1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on linux/ipv4/udp.c
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
57 static u32
udp6_ehashfn(const struct net
*net
,
58 const struct in6_addr
*laddr
,
60 const struct in6_addr
*faddr
,
63 static u32 udp6_ehash_secret __read_mostly
;
64 static u32 udp_ipv6_hash_secret __read_mostly
;
68 net_get_random_once(&udp6_ehash_secret
,
69 sizeof(udp6_ehash_secret
));
70 net_get_random_once(&udp_ipv6_hash_secret
,
71 sizeof(udp_ipv6_hash_secret
));
73 lhash
= (__force u32
)laddr
->s6_addr32
[3];
74 fhash
= __ipv6_addr_jhash(faddr
, udp_ipv6_hash_secret
);
76 return __inet6_ehashfn(lhash
, lport
, fhash
, fport
,
77 udp_ipv6_hash_secret
+ net_hash_mix(net
));
80 int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
82 unsigned int hash2_nulladdr
=
83 ipv6_portaddr_hash(sock_net(sk
), &in6addr_any
, snum
);
84 unsigned int hash2_partial
=
85 ipv6_portaddr_hash(sock_net(sk
), &sk
->sk_v6_rcv_saddr
, 0);
87 /* precompute partial secondary hash */
88 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
89 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
92 void udp_v6_rehash(struct sock
*sk
)
94 u16 new_hash
= ipv6_portaddr_hash(sock_net(sk
),
96 inet_sk(sk
)->inet_num
);
98 udp_lib_rehash(sk
, new_hash
);
101 static int compute_score(struct sock
*sk
, struct net
*net
,
102 const struct in6_addr
*saddr
, __be16 sport
,
103 const struct in6_addr
*daddr
, unsigned short hnum
,
107 struct inet_sock
*inet
;
110 if (!net_eq(sock_net(sk
), net
) ||
111 udp_sk(sk
)->udp_port_hash
!= hnum
||
112 sk
->sk_family
!= PF_INET6
)
115 if (!ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, daddr
))
121 if (inet
->inet_dport
) {
122 if (inet
->inet_dport
!= sport
)
127 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
128 if (!ipv6_addr_equal(&sk
->sk_v6_daddr
, saddr
))
133 dev_match
= udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
);
138 if (READ_ONCE(sk
->sk_incoming_cpu
) == raw_smp_processor_id())
144 /* called with rcu_read_lock() */
145 static struct sock
*udp6_lib_lookup2(struct net
*net
,
146 const struct in6_addr
*saddr
, __be16 sport
,
147 const struct in6_addr
*daddr
, unsigned int hnum
,
148 int dif
, int sdif
, struct udp_hslot
*hslot2
,
151 struct sock
*sk
, *result
;
157 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
158 score
= compute_score(sk
, net
, saddr
, sport
,
159 daddr
, hnum
, dif
, sdif
);
160 if (score
> badness
) {
161 if (sk
->sk_reuseport
&&
162 sk
->sk_state
!= TCP_ESTABLISHED
) {
163 hash
= udp6_ehashfn(net
, daddr
, hnum
,
166 result
= reuseport_select_sock(sk
, hash
, skb
,
167 sizeof(struct udphdr
));
168 if (result
&& !reuseport_has_conns(sk
, false))
178 /* rcu_read_lock() must be held */
179 struct sock
*__udp6_lib_lookup(struct net
*net
,
180 const struct in6_addr
*saddr
, __be16 sport
,
181 const struct in6_addr
*daddr
, __be16 dport
,
182 int dif
, int sdif
, struct udp_table
*udptable
,
185 unsigned short hnum
= ntohs(dport
);
186 unsigned int hash2
, slot2
;
187 struct udp_hslot
*hslot2
;
190 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
);
191 slot2
= hash2
& udptable
->mask
;
192 hslot2
= &udptable
->hash2
[slot2
];
194 result
= udp6_lib_lookup2(net
, saddr
, sport
,
195 daddr
, hnum
, dif
, sdif
,
198 hash2
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
);
199 slot2
= hash2
& udptable
->mask
;
201 hslot2
= &udptable
->hash2
[slot2
];
203 result
= udp6_lib_lookup2(net
, saddr
, sport
,
204 &in6addr_any
, hnum
, dif
, sdif
,
211 EXPORT_SYMBOL_GPL(__udp6_lib_lookup
);
213 static struct sock
*__udp6_lib_lookup_skb(struct sk_buff
*skb
,
214 __be16 sport
, __be16 dport
,
215 struct udp_table
*udptable
)
217 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
219 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
220 &iph
->daddr
, dport
, inet6_iif(skb
),
221 inet6_sdif(skb
), udptable
, skb
);
224 struct sock
*udp6_lib_lookup_skb(struct sk_buff
*skb
,
225 __be16 sport
, __be16 dport
)
227 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
229 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
230 &iph
->daddr
, dport
, inet6_iif(skb
),
231 inet6_sdif(skb
), &udp_table
, NULL
);
233 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb
);
235 /* Must be called under rcu_read_lock().
236 * Does increment socket refcount.
238 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
239 struct sock
*udp6_lib_lookup(struct net
*net
, const struct in6_addr
*saddr
, __be16 sport
,
240 const struct in6_addr
*daddr
, __be16 dport
, int dif
)
244 sk
= __udp6_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
245 dif
, 0, &udp_table
, NULL
);
246 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
250 EXPORT_SYMBOL_GPL(udp6_lib_lookup
);
253 /* do not use the scratch area len for jumbogram: their length execeeds the
254 * scratch area space; note that the IP6CB flags is still in the first
255 * cacheline, so checking for jumbograms is cheap
257 static int udp6_skb_len(struct sk_buff
*skb
)
259 return unlikely(inet6_is_jumbogram(skb
)) ? skb
->len
: udp_skb_len(skb
);
263 * This should be easy, if there is something there we
264 * return it, otherwise we block.
267 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
268 int noblock
, int flags
, int *addr_len
)
270 struct ipv6_pinfo
*np
= inet6_sk(sk
);
271 struct inet_sock
*inet
= inet_sk(sk
);
273 unsigned int ulen
, copied
;
274 int off
, err
, peeking
= flags
& MSG_PEEK
;
275 int is_udplite
= IS_UDPLITE(sk
);
276 struct udp_mib __percpu
*mib
;
277 bool checksum_valid
= false;
280 if (flags
& MSG_ERRQUEUE
)
281 return ipv6_recv_error(sk
, msg
, len
, addr_len
);
283 if (np
->rxpmtu
&& np
->rxopt
.bits
.rxpmtu
)
284 return ipv6_recv_rxpmtu(sk
, msg
, len
, addr_len
);
287 off
= sk_peek_offset(sk
, flags
);
288 skb
= __skb_recv_udp(sk
, flags
, noblock
, &off
, &err
);
292 ulen
= udp6_skb_len(skb
);
294 if (copied
> ulen
- off
)
296 else if (copied
< ulen
)
297 msg
->msg_flags
|= MSG_TRUNC
;
299 is_udp4
= (skb
->protocol
== htons(ETH_P_IP
));
300 mib
= __UDPX_MIB(sk
, is_udp4
);
303 * If checksum is needed at all, try to do it while copying the
304 * data. If the data is truncated, or if we only want a partial
305 * coverage checksum (UDP-Lite), do it before the copy.
308 if (copied
< ulen
|| peeking
||
309 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
310 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
311 !__udp_lib_checksum_complete(skb
);
316 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
317 if (udp_skb_is_linear(skb
))
318 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
320 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
322 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
328 atomic_inc(&sk
->sk_drops
);
329 SNMP_INC_STATS(mib
, UDP_MIB_INERRORS
);
335 SNMP_INC_STATS(mib
, UDP_MIB_INDATAGRAMS
);
337 sock_recv_ts_and_drops(msg
, sk
, skb
);
339 /* Copy the address. */
341 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
342 sin6
->sin6_family
= AF_INET6
;
343 sin6
->sin6_port
= udp_hdr(skb
)->source
;
344 sin6
->sin6_flowinfo
= 0;
347 ipv6_addr_set_v4mapped(ip_hdr(skb
)->saddr
,
349 sin6
->sin6_scope_id
= 0;
351 sin6
->sin6_addr
= ipv6_hdr(skb
)->saddr
;
352 sin6
->sin6_scope_id
=
353 ipv6_iface_scope_id(&sin6
->sin6_addr
,
356 *addr_len
= sizeof(*sin6
);
358 if (cgroup_bpf_enabled
)
359 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk
,
360 (struct sockaddr
*)sin6
);
363 if (udp_sk(sk
)->gro_enabled
)
364 udp_cmsg_recv(msg
, sk
, skb
);
367 ip6_datagram_recv_common_ctl(sk
, msg
, skb
);
370 if (inet
->cmsg_flags
)
371 ip_cmsg_recv_offset(msg
, sk
, skb
,
372 sizeof(struct udphdr
), off
);
375 ip6_datagram_recv_specific_ctl(sk
, msg
, skb
);
379 if (flags
& MSG_TRUNC
)
382 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
386 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
387 udp_skb_destructor
)) {
388 SNMP_INC_STATS(mib
, UDP_MIB_CSUMERRORS
);
389 SNMP_INC_STATS(mib
, UDP_MIB_INERRORS
);
393 /* starting over for a new packet, but check if we need to yield */
395 msg
->msg_flags
&= ~MSG_TRUNC
;
399 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
400 void udpv6_encap_enable(void)
402 static_branch_inc(&udpv6_encap_needed_key
);
404 EXPORT_SYMBOL(udpv6_encap_enable
);
406 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
407 * through error handlers in encapsulations looking for a match.
409 static int __udp6_lib_err_encap_no_sk(struct sk_buff
*skb
,
410 struct inet6_skb_parm
*opt
,
411 u8 type
, u8 code
, int offset
, __be32 info
)
415 for (i
= 0; i
< MAX_IPTUN_ENCAP_OPS
; i
++) {
416 int (*handler
)(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
417 u8 type
, u8 code
, int offset
, __be32 info
);
418 const struct ip6_tnl_encap_ops
*encap
;
420 encap
= rcu_dereference(ip6tun_encaps
[i
]);
423 handler
= encap
->err_handler
;
424 if (handler
&& !handler(skb
, opt
, type
, code
, offset
, info
))
431 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
432 * reversing source and destination port: this will match tunnels that force the
433 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
434 * lwtunnels might actually break this assumption by being configured with
435 * different destination ports on endpoints, in this case we won't be able to
436 * trace ICMP messages back to them.
438 * If this doesn't match any socket, probe tunnels with arbitrary destination
439 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
440 * we've sent packets to won't necessarily match the local destination port.
442 * Then ask the tunnel implementation to match the error against a valid
445 * Return an error if we can't find a match, the socket if we need further
446 * processing, zero otherwise.
448 static struct sock
*__udp6_lib_err_encap(struct net
*net
,
449 const struct ipv6hdr
*hdr
, int offset
,
451 struct udp_table
*udptable
,
453 struct inet6_skb_parm
*opt
,
454 u8 type
, u8 code
, __be32 info
)
456 int network_offset
, transport_offset
;
459 network_offset
= skb_network_offset(skb
);
460 transport_offset
= skb_transport_offset(skb
);
462 /* Network header needs to point to the outer IPv6 header inside ICMP */
463 skb_reset_network_header(skb
);
465 /* Transport header needs to point to the UDP header */
466 skb_set_transport_header(skb
, offset
);
468 sk
= __udp6_lib_lookup(net
, &hdr
->daddr
, uh
->source
,
469 &hdr
->saddr
, uh
->dest
,
470 inet6_iif(skb
), 0, udptable
, skb
);
472 int (*lookup
)(struct sock
*sk
, struct sk_buff
*skb
);
473 struct udp_sock
*up
= udp_sk(sk
);
475 lookup
= READ_ONCE(up
->encap_err_lookup
);
476 if (!lookup
|| lookup(sk
, skb
))
481 sk
= ERR_PTR(__udp6_lib_err_encap_no_sk(skb
, opt
, type
, code
,
485 skb_set_transport_header(skb
, transport_offset
);
486 skb_set_network_header(skb
, network_offset
);
491 int __udp6_lib_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
492 u8 type
, u8 code
, int offset
, __be32 info
,
493 struct udp_table
*udptable
)
495 struct ipv6_pinfo
*np
;
496 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
497 const struct in6_addr
*saddr
= &hdr
->saddr
;
498 const struct in6_addr
*daddr
= &hdr
->daddr
;
499 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+offset
);
504 struct net
*net
= dev_net(skb
->dev
);
506 sk
= __udp6_lib_lookup(net
, daddr
, uh
->dest
, saddr
, uh
->source
,
507 inet6_iif(skb
), inet6_sdif(skb
), udptable
, NULL
);
509 /* No socket for error: try tunnels before discarding */
510 sk
= ERR_PTR(-ENOENT
);
511 if (static_branch_unlikely(&udpv6_encap_needed_key
)) {
512 sk
= __udp6_lib_err_encap(net
, hdr
, offset
, uh
,
514 opt
, type
, code
, info
);
520 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
528 harderr
= icmpv6_err_convert(type
, code
, &err
);
531 if (type
== ICMPV6_PKT_TOOBIG
) {
532 if (!ip6_sk_accept_pmtu(sk
))
534 ip6_sk_update_pmtu(skb
, sk
, info
);
535 if (np
->pmtudisc
!= IPV6_PMTUDISC_DONT
)
538 if (type
== NDISC_REDIRECT
) {
540 ip6_redirect(skb
, sock_net(sk
), inet6_iif(skb
),
541 sk
->sk_mark
, sk
->sk_uid
);
543 ip6_sk_redirect(skb
, sk
);
548 /* Tunnels don't have an application socket: don't pass errors back */
553 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
556 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
560 sk
->sk_error_report(sk
);
565 static int __udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
569 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
570 sock_rps_save_rxhash(sk
, skb
);
571 sk_mark_napi_id(sk
, skb
);
572 sk_incoming_cpu_update(sk
);
574 sk_mark_napi_id_once(sk
, skb
);
577 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
579 int is_udplite
= IS_UDPLITE(sk
);
581 /* Note that an ENOMEM error is charged twice */
583 UDP6_INC_STATS(sock_net(sk
),
584 UDP_MIB_RCVBUFERRORS
, is_udplite
);
585 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
593 static __inline__
int udpv6_err(struct sk_buff
*skb
,
594 struct inet6_skb_parm
*opt
, u8 type
,
595 u8 code
, int offset
, __be32 info
)
597 return __udp6_lib_err(skb
, opt
, type
, code
, offset
, info
, &udp_table
);
600 static int udpv6_queue_rcv_one_skb(struct sock
*sk
, struct sk_buff
*skb
)
602 struct udp_sock
*up
= udp_sk(sk
);
603 int is_udplite
= IS_UDPLITE(sk
);
605 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
608 if (static_branch_unlikely(&udpv6_encap_needed_key
) && up
->encap_type
) {
609 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
612 * This is an encapsulation socket so pass the skb to
613 * the socket's udp_encap_rcv() hook. Otherwise, just
614 * fall through and pass this up the UDP socket.
615 * up->encap_rcv() returns the following value:
616 * =0 if skb was successfully passed to the encap
617 * handler or was discarded by it.
618 * >0 if skb should be passed on to UDP.
619 * <0 if skb should be resubmitted as proto -N
622 /* if we're overly short, let UDP handle it */
623 encap_rcv
= READ_ONCE(up
->encap_rcv
);
627 /* Verify checksum before giving to encap */
628 if (udp_lib_checksum_complete(skb
))
631 ret
= encap_rcv(sk
, skb
);
633 __UDP_INC_STATS(sock_net(sk
),
640 /* FALLTHROUGH -- it's a UDP Packet */
644 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
646 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
648 if (up
->pcrlen
== 0) { /* full coverage was set */
649 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
650 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
653 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
654 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
655 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
660 prefetch(&sk
->sk_rmem_alloc
);
661 if (rcu_access_pointer(sk
->sk_filter
) &&
662 udp_lib_checksum_complete(skb
))
665 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
668 udp_csum_pull_header(skb
);
672 return __udpv6_queue_rcv_skb(sk
, skb
);
675 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
677 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
678 atomic_inc(&sk
->sk_drops
);
683 static int udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
685 struct sk_buff
*next
, *segs
;
688 if (likely(!udp_unexpected_gso(sk
, skb
)))
689 return udpv6_queue_rcv_one_skb(sk
, skb
);
691 __skb_push(skb
, -skb_mac_offset(skb
));
692 segs
= udp_rcv_segment(sk
, skb
, false);
693 skb_list_walk_safe(segs
, skb
, next
) {
694 __skb_pull(skb
, skb_transport_offset(skb
));
696 ret
= udpv6_queue_rcv_one_skb(sk
, skb
);
698 ip6_protocol_deliver_rcu(dev_net(skb
->dev
), skb
, ret
,
704 static bool __udp_v6_is_mcast_sock(struct net
*net
, struct sock
*sk
,
705 __be16 loc_port
, const struct in6_addr
*loc_addr
,
706 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
707 int dif
, int sdif
, unsigned short hnum
)
709 struct inet_sock
*inet
= inet_sk(sk
);
711 if (!net_eq(sock_net(sk
), net
))
714 if (udp_sk(sk
)->udp_port_hash
!= hnum
||
715 sk
->sk_family
!= PF_INET6
||
716 (inet
->inet_dport
&& inet
->inet_dport
!= rmt_port
) ||
717 (!ipv6_addr_any(&sk
->sk_v6_daddr
) &&
718 !ipv6_addr_equal(&sk
->sk_v6_daddr
, rmt_addr
)) ||
719 !udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
) ||
720 (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
) &&
721 !ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, loc_addr
)))
723 if (!inet6_mc_check(sk
, loc_addr
, rmt_addr
))
728 static void udp6_csum_zero_error(struct sk_buff
*skb
)
730 /* RFC 2460 section 8.1 says that we SHOULD log
731 * this error. Well, it is reasonable.
733 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
734 &ipv6_hdr(skb
)->saddr
, ntohs(udp_hdr(skb
)->source
),
735 &ipv6_hdr(skb
)->daddr
, ntohs(udp_hdr(skb
)->dest
));
739 * Note: called only from the BH handler context,
740 * so we don't need to lock the hashes.
742 static int __udp6_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
743 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
744 struct udp_table
*udptable
, int proto
)
746 struct sock
*sk
, *first
= NULL
;
747 const struct udphdr
*uh
= udp_hdr(skb
);
748 unsigned short hnum
= ntohs(uh
->dest
);
749 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
750 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
751 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
752 int dif
= inet6_iif(skb
);
753 int sdif
= inet6_sdif(skb
);
754 struct hlist_node
*node
;
755 struct sk_buff
*nskb
;
758 hash2_any
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
) &
760 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
762 hslot
= &udptable
->hash2
[hash2
];
763 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
766 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
767 if (!__udp_v6_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
768 uh
->source
, saddr
, dif
, sdif
,
771 /* If zero checksum and no_check is not on for
772 * the socket then skip it.
774 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
780 nskb
= skb_clone(skb
, GFP_ATOMIC
);
781 if (unlikely(!nskb
)) {
782 atomic_inc(&sk
->sk_drops
);
783 __UDP6_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
785 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
,
790 if (udpv6_queue_rcv_skb(sk
, nskb
) > 0)
794 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
795 if (use_hash2
&& hash2
!= hash2_any
) {
801 if (udpv6_queue_rcv_skb(first
, skb
) > 0)
805 __UDP6_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
806 proto
== IPPROTO_UDPLITE
);
811 static void udp6_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
813 if (udp_sk_rx_dst_set(sk
, dst
)) {
814 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
816 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
820 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
821 * return code conversion for ip layer consumption
823 static int udp6_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
828 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
829 skb_checksum_try_convert(skb
, IPPROTO_UDP
, ip6_compute_pseudo
);
831 ret
= udpv6_queue_rcv_skb(sk
, skb
);
833 /* a return value > 0 means to resubmit the input */
839 int __udp6_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
842 const struct in6_addr
*saddr
, *daddr
;
843 struct net
*net
= dev_net(skb
->dev
);
848 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
851 saddr
= &ipv6_hdr(skb
)->saddr
;
852 daddr
= &ipv6_hdr(skb
)->daddr
;
855 ulen
= ntohs(uh
->len
);
859 if (proto
== IPPROTO_UDP
) {
860 /* UDP validates ulen. */
862 /* Check for jumbo payload */
866 if (ulen
< sizeof(*uh
))
869 if (ulen
< skb
->len
) {
870 if (pskb_trim_rcsum(skb
, ulen
))
872 saddr
= &ipv6_hdr(skb
)->saddr
;
873 daddr
= &ipv6_hdr(skb
)->daddr
;
878 if (udp6_csum_init(skb
, uh
, proto
))
881 /* Check if the socket is already available, e.g. due to early demux */
882 sk
= skb_steal_sock(skb
);
884 struct dst_entry
*dst
= skb_dst(skb
);
887 if (unlikely(sk
->sk_rx_dst
!= dst
))
888 udp6_sk_rx_dst_set(sk
, dst
);
890 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
) {
892 goto report_csum_error
;
895 ret
= udp6_unicast_rcv_skb(sk
, skb
, uh
);
901 * Multicast receive code
903 if (ipv6_addr_is_multicast(daddr
))
904 return __udp6_lib_mcast_deliver(net
, skb
,
905 saddr
, daddr
, udptable
, proto
);
908 sk
= __udp6_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
910 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
911 goto report_csum_error
;
912 return udp6_unicast_rcv_skb(sk
, skb
, uh
);
916 goto report_csum_error
;
918 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
921 if (udp_lib_checksum_complete(skb
))
924 __UDP6_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
925 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
931 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
932 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
933 saddr
, ntohs(uh
->source
),
935 daddr
, ntohs(uh
->dest
));
939 udp6_csum_zero_error(skb
);
941 __UDP6_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
943 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
949 static struct sock
*__udp6_lib_demux_lookup(struct net
*net
,
950 __be16 loc_port
, const struct in6_addr
*loc_addr
,
951 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
954 unsigned short hnum
= ntohs(loc_port
);
955 unsigned int hash2
= ipv6_portaddr_hash(net
, loc_addr
, hnum
);
956 unsigned int slot2
= hash2
& udp_table
.mask
;
957 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
958 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
961 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
962 if (sk
->sk_state
== TCP_ESTABLISHED
&&
963 INET6_MATCH(sk
, net
, rmt_addr
, loc_addr
, ports
, dif
, sdif
))
965 /* Only check first socket in chain */
971 INDIRECT_CALLABLE_SCOPE
void udp_v6_early_demux(struct sk_buff
*skb
)
973 struct net
*net
= dev_net(skb
->dev
);
974 const struct udphdr
*uh
;
976 struct dst_entry
*dst
;
977 int dif
= skb
->dev
->ifindex
;
978 int sdif
= inet6_sdif(skb
);
980 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) +
981 sizeof(struct udphdr
)))
986 if (skb
->pkt_type
== PACKET_HOST
)
987 sk
= __udp6_lib_demux_lookup(net
, uh
->dest
,
988 &ipv6_hdr(skb
)->daddr
,
989 uh
->source
, &ipv6_hdr(skb
)->saddr
,
994 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
998 skb
->destructor
= sock_efree
;
999 dst
= READ_ONCE(sk
->sk_rx_dst
);
1002 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1004 /* set noref for now.
1005 * any place which wants to hold dst has to call
1008 skb_dst_set_noref(skb
, dst
);
1012 INDIRECT_CALLABLE_SCOPE
int udpv6_rcv(struct sk_buff
*skb
)
1014 return __udp6_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
1018 * Throw away all pending data and cancel the corking. Socket is locked.
1020 static void udp_v6_flush_pending_frames(struct sock
*sk
)
1022 struct udp_sock
*up
= udp_sk(sk
);
1024 if (up
->pending
== AF_INET
)
1025 udp_flush_pending_frames(sk
);
1026 else if (up
->pending
) {
1029 ip6_flush_pending_frames(sk
);
1033 static int udpv6_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
1036 if (addr_len
< offsetofend(struct sockaddr
, sa_family
))
1038 /* The following checks are replicated from __ip6_datagram_connect()
1039 * and intended to prevent BPF program called below from accessing
1040 * bytes that are out of the bound specified by user in addr_len.
1042 if (uaddr
->sa_family
== AF_INET
) {
1043 if (__ipv6_only_sock(sk
))
1044 return -EAFNOSUPPORT
;
1045 return udp_pre_connect(sk
, uaddr
, addr_len
);
1048 if (addr_len
< SIN6_LEN_RFC2133
)
1051 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk
, uaddr
);
1055 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1056 * @sk: socket we are sending on
1057 * @skb: sk_buff containing the filled-in UDP header
1058 * (checksum field must be zeroed out)
1060 static void udp6_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
1061 const struct in6_addr
*saddr
,
1062 const struct in6_addr
*daddr
, int len
)
1064 unsigned int offset
;
1065 struct udphdr
*uh
= udp_hdr(skb
);
1066 struct sk_buff
*frags
= skb_shinfo(skb
)->frag_list
;
1070 /* Only one fragment on the socket. */
1071 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1072 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1073 uh
->check
= ~csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
, 0);
1076 * HW-checksum won't work as there are two or more
1077 * fragments on the socket so that all csums of sk_buffs
1078 * should be together
1080 offset
= skb_transport_offset(skb
);
1081 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
1084 skb
->ip_summed
= CHECKSUM_NONE
;
1087 csum
= csum_add(csum
, frags
->csum
);
1088 } while ((frags
= frags
->next
));
1090 uh
->check
= csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
,
1093 uh
->check
= CSUM_MANGLED_0
;
1101 static int udp_v6_send_skb(struct sk_buff
*skb
, struct flowi6
*fl6
,
1102 struct inet_cork
*cork
)
1104 struct sock
*sk
= skb
->sk
;
1107 int is_udplite
= IS_UDPLITE(sk
);
1109 int offset
= skb_transport_offset(skb
);
1110 int len
= skb
->len
- offset
;
1111 int datalen
= len
- sizeof(*uh
);
1114 * Create a UDP header
1117 uh
->source
= fl6
->fl6_sport
;
1118 uh
->dest
= fl6
->fl6_dport
;
1119 uh
->len
= htons(len
);
1122 if (cork
->gso_size
) {
1123 const int hlen
= skb_network_header_len(skb
) +
1124 sizeof(struct udphdr
);
1126 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
1130 if (skb
->len
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
1134 if (udp_sk(sk
)->no_check6_tx
) {
1138 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| is_udplite
||
1139 dst_xfrm(skb_dst(skb
))) {
1144 if (datalen
> cork
->gso_size
) {
1145 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
1146 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
1147 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(datalen
,
1154 csum
= udplite_csum(skb
);
1155 else if (udp_sk(sk
)->no_check6_tx
) { /* UDP csum disabled */
1156 skb
->ip_summed
= CHECKSUM_NONE
;
1158 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
1160 udp6_hwcsum_outgoing(sk
, skb
, &fl6
->saddr
, &fl6
->daddr
, len
);
1163 csum
= udp_csum(skb
);
1165 /* add protocol-dependent pseudo-header */
1166 uh
->check
= csum_ipv6_magic(&fl6
->saddr
, &fl6
->daddr
,
1167 len
, fl6
->flowi6_proto
, csum
);
1169 uh
->check
= CSUM_MANGLED_0
;
1172 err
= ip6_send_skb(skb
);
1174 if (err
== -ENOBUFS
&& !inet6_sk(sk
)->recverr
) {
1175 UDP6_INC_STATS(sock_net(sk
),
1176 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1180 UDP6_INC_STATS(sock_net(sk
),
1181 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
1186 static int udp_v6_push_pending_frames(struct sock
*sk
)
1188 struct sk_buff
*skb
;
1189 struct udp_sock
*up
= udp_sk(sk
);
1193 if (up
->pending
== AF_INET
)
1194 return udp_push_pending_frames(sk
);
1196 /* ip6_finish_skb will release the cork, so make a copy of
1199 fl6
= inet_sk(sk
)->cork
.fl
.u
.ip6
;
1201 skb
= ip6_finish_skb(sk
);
1205 err
= udp_v6_send_skb(skb
, &fl6
, &inet_sk(sk
)->cork
.base
);
1213 int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1215 struct ipv6_txoptions opt_space
;
1216 struct udp_sock
*up
= udp_sk(sk
);
1217 struct inet_sock
*inet
= inet_sk(sk
);
1218 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1219 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
1220 struct in6_addr
*daddr
, *final_p
, final
;
1221 struct ipv6_txoptions
*opt
= NULL
;
1222 struct ipv6_txoptions
*opt_to_free
= NULL
;
1223 struct ip6_flowlabel
*flowlabel
= NULL
;
1225 struct dst_entry
*dst
;
1226 struct ipcm6_cookie ipc6
;
1227 int addr_len
= msg
->msg_namelen
;
1228 bool connected
= false;
1230 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
1232 int is_udplite
= IS_UDPLITE(sk
);
1233 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1236 ipc6
.gso_size
= up
->gso_size
;
1237 ipc6
.sockc
.tsflags
= sk
->sk_tsflags
;
1238 ipc6
.sockc
.mark
= sk
->sk_mark
;
1240 /* destination address check */
1242 if (addr_len
< offsetof(struct sockaddr
, sa_data
))
1245 switch (sin6
->sin6_family
) {
1247 if (addr_len
< SIN6_LEN_RFC2133
)
1249 daddr
= &sin6
->sin6_addr
;
1250 if (ipv6_addr_any(daddr
) &&
1251 ipv6_addr_v4mapped(&np
->saddr
))
1252 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
1256 goto do_udp_sendmsg
;
1258 msg
->msg_name
= sin6
= NULL
;
1259 msg
->msg_namelen
= addr_len
= 0;
1265 } else if (!up
->pending
) {
1266 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1267 return -EDESTADDRREQ
;
1268 daddr
= &sk
->sk_v6_daddr
;
1273 if (ipv6_addr_v4mapped(daddr
)) {
1274 struct sockaddr_in sin
;
1275 sin
.sin_family
= AF_INET
;
1276 sin
.sin_port
= sin6
? sin6
->sin6_port
: inet
->inet_dport
;
1277 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
1278 msg
->msg_name
= &sin
;
1279 msg
->msg_namelen
= sizeof(sin
);
1281 if (__ipv6_only_sock(sk
))
1282 return -ENETUNREACH
;
1283 return udp_sendmsg(sk
, msg
, len
);
1287 if (up
->pending
== AF_INET
)
1288 return udp_sendmsg(sk
, msg
, len
);
1290 /* Rough check on arithmetic overflow,
1291 better check is made in ip6_append_data().
1293 if (len
> INT_MAX
- sizeof(struct udphdr
))
1296 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1299 * There are pending frames.
1300 * The socket lock must be held while it's corked.
1303 if (likely(up
->pending
)) {
1304 if (unlikely(up
->pending
!= AF_INET6
)) {
1306 return -EAFNOSUPPORT
;
1309 goto do_append_data
;
1313 ulen
+= sizeof(struct udphdr
);
1315 memset(&fl6
, 0, sizeof(fl6
));
1318 if (sin6
->sin6_port
== 0)
1321 fl6
.fl6_dport
= sin6
->sin6_port
;
1322 daddr
= &sin6
->sin6_addr
;
1325 fl6
.flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
1326 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
1327 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1328 if (IS_ERR(flowlabel
))
1334 * Otherwise it will be difficult to maintain
1337 if (sk
->sk_state
== TCP_ESTABLISHED
&&
1338 ipv6_addr_equal(daddr
, &sk
->sk_v6_daddr
))
1339 daddr
= &sk
->sk_v6_daddr
;
1341 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
1342 sin6
->sin6_scope_id
&&
1343 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr
)))
1344 fl6
.flowi6_oif
= sin6
->sin6_scope_id
;
1346 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1347 return -EDESTADDRREQ
;
1349 fl6
.fl6_dport
= inet
->inet_dport
;
1350 daddr
= &sk
->sk_v6_daddr
;
1351 fl6
.flowlabel
= np
->flow_label
;
1355 if (!fl6
.flowi6_oif
)
1356 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
1358 if (!fl6
.flowi6_oif
)
1359 fl6
.flowi6_oif
= np
->sticky_pktinfo
.ipi6_ifindex
;
1361 fl6
.flowi6_mark
= ipc6
.sockc
.mark
;
1362 fl6
.flowi6_uid
= sk
->sk_uid
;
1364 if (msg
->msg_controllen
) {
1366 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
1367 opt
->tot_len
= sizeof(*opt
);
1370 err
= udp_cmsg_send(sk
, msg
, &ipc6
.gso_size
);
1372 err
= ip6_datagram_send_ctl(sock_net(sk
), sk
, msg
, &fl6
,
1375 fl6_sock_release(flowlabel
);
1378 if ((fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
1379 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1380 if (IS_ERR(flowlabel
))
1383 if (!(opt
->opt_nflen
|opt
->opt_flen
))
1388 opt
= txopt_get(np
);
1392 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
1393 opt
= ipv6_fixup_options(&opt_space
, opt
);
1396 fl6
.flowi6_proto
= sk
->sk_protocol
;
1398 if (ipv6_addr_any(&fl6
.saddr
) && !ipv6_addr_any(&np
->saddr
))
1399 fl6
.saddr
= np
->saddr
;
1400 fl6
.fl6_sport
= inet
->inet_sport
;
1402 if (cgroup_bpf_enabled
&& !connected
) {
1403 err
= BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk
,
1404 (struct sockaddr
*)sin6
, &fl6
.saddr
);
1408 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
1409 /* BPF program rewrote IPv6-only by IPv4-mapped
1410 * IPv6. It's currently unsupported.
1415 if (sin6
->sin6_port
== 0) {
1416 /* BPF program set invalid port. Reject it. */
1420 fl6
.fl6_dport
= sin6
->sin6_port
;
1421 fl6
.daddr
= sin6
->sin6_addr
;
1425 if (ipv6_addr_any(&fl6
.daddr
))
1426 fl6
.daddr
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
1428 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
1432 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
)) {
1433 fl6
.flowi6_oif
= np
->mcast_oif
;
1435 } else if (!fl6
.flowi6_oif
)
1436 fl6
.flowi6_oif
= np
->ucast_oif
;
1438 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
1440 if (ipc6
.tclass
< 0)
1441 ipc6
.tclass
= np
->tclass
;
1443 fl6
.flowlabel
= ip6_make_flowinfo(ipc6
.tclass
, fl6
.flowlabel
);
1445 dst
= ip6_sk_dst_lookup_flow(sk
, &fl6
, final_p
, connected
);
1452 if (ipc6
.hlimit
< 0)
1453 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
1455 if (msg
->msg_flags
&MSG_CONFIRM
)
1459 /* Lockless fast path for the non-corking case */
1461 struct inet_cork_full cork
;
1462 struct sk_buff
*skb
;
1464 skb
= ip6_make_skb(sk
, getfrag
, msg
, ulen
,
1465 sizeof(struct udphdr
), &ipc6
,
1466 &fl6
, (struct rt6_info
*)dst
,
1467 msg
->msg_flags
, &cork
);
1469 if (!IS_ERR_OR_NULL(skb
))
1470 err
= udp_v6_send_skb(skb
, &fl6
, &cork
.base
);
1475 if (unlikely(up
->pending
)) {
1476 /* The socket is already corked while preparing it. */
1477 /* ... which is an evident application bug. --ANK */
1480 net_dbg_ratelimited("udp cork app bug 2\n");
1485 up
->pending
= AF_INET6
;
1488 if (ipc6
.dontfrag
< 0)
1489 ipc6
.dontfrag
= np
->dontfrag
;
1491 err
= ip6_append_data(sk
, getfrag
, msg
, ulen
, sizeof(struct udphdr
),
1492 &ipc6
, &fl6
, (struct rt6_info
*)dst
,
1493 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1495 udp_v6_flush_pending_frames(sk
);
1497 err
= udp_v6_push_pending_frames(sk
);
1498 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1502 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1508 fl6_sock_release(flowlabel
);
1509 txopt_put(opt_to_free
);
1513 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1514 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1515 * we don't have a good statistic (IpOutDiscards but it can be too many
1516 * things). We could add another new stat but at least for now that
1517 * seems like overkill.
1519 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1520 UDP6_INC_STATS(sock_net(sk
),
1521 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1526 if (msg
->msg_flags
& MSG_PROBE
)
1527 dst_confirm_neigh(dst
, &fl6
.daddr
);
1528 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1529 goto back_from_confirm
;
1534 void udpv6_destroy_sock(struct sock
*sk
)
1536 struct udp_sock
*up
= udp_sk(sk
);
1538 udp_v6_flush_pending_frames(sk
);
1541 if (static_branch_unlikely(&udpv6_encap_needed_key
)) {
1542 if (up
->encap_type
) {
1543 void (*encap_destroy
)(struct sock
*sk
);
1544 encap_destroy
= READ_ONCE(up
->encap_destroy
);
1548 if (up
->encap_enabled
)
1549 static_branch_dec(&udpv6_encap_needed_key
);
1552 inet6_destroy_sock(sk
);
1556 * Socket option code for UDP
1558 int udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1559 char __user
*optval
, unsigned int optlen
)
1561 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1562 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1563 udp_v6_push_pending_frames
);
1564 return ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1567 #ifdef CONFIG_COMPAT
1568 int compat_udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1569 char __user
*optval
, unsigned int optlen
)
1571 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1572 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1573 udp_v6_push_pending_frames
);
1574 return compat_ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1578 int udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1579 char __user
*optval
, int __user
*optlen
)
1581 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1582 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1583 return ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1586 #ifdef CONFIG_COMPAT
1587 int compat_udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1588 char __user
*optval
, int __user
*optlen
)
1590 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1591 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1592 return compat_ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1596 /* thinking of making this const? Don't.
1597 * early_demux can change based on sysctl.
1599 static struct inet6_protocol udpv6_protocol
= {
1600 .early_demux
= udp_v6_early_demux
,
1601 .early_demux_handler
= udp_v6_early_demux
,
1602 .handler
= udpv6_rcv
,
1603 .err_handler
= udpv6_err
,
1604 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1607 /* ------------------------------------------------------------------------ */
1608 #ifdef CONFIG_PROC_FS
1609 int udp6_seq_show(struct seq_file
*seq
, void *v
)
1611 if (v
== SEQ_START_TOKEN
) {
1612 seq_puts(seq
, IPV6_SEQ_DGRAM_HEADER
);
1614 int bucket
= ((struct udp_iter_state
*)seq
->private)->bucket
;
1615 struct inet_sock
*inet
= inet_sk(v
);
1616 __u16 srcp
= ntohs(inet
->inet_sport
);
1617 __u16 destp
= ntohs(inet
->inet_dport
);
1618 __ip6_dgram_sock_seq_show(seq
, v
, srcp
, destp
,
1619 udp_rqueue_get(v
), bucket
);
1624 const struct seq_operations udp6_seq_ops
= {
1625 .start
= udp_seq_start
,
1626 .next
= udp_seq_next
,
1627 .stop
= udp_seq_stop
,
1628 .show
= udp6_seq_show
,
1630 EXPORT_SYMBOL(udp6_seq_ops
);
1632 static struct udp_seq_afinfo udp6_seq_afinfo
= {
1634 .udp_table
= &udp_table
,
1637 int __net_init
udp6_proc_init(struct net
*net
)
1639 if (!proc_create_net_data("udp6", 0444, net
->proc_net
, &udp6_seq_ops
,
1640 sizeof(struct udp_iter_state
), &udp6_seq_afinfo
))
1645 void udp6_proc_exit(struct net
*net
)
1647 remove_proc_entry("udp6", net
->proc_net
);
1649 #endif /* CONFIG_PROC_FS */
1651 /* ------------------------------------------------------------------------ */
1653 struct proto udpv6_prot
= {
1655 .owner
= THIS_MODULE
,
1656 .close
= udp_lib_close
,
1657 .pre_connect
= udpv6_pre_connect
,
1658 .connect
= ip6_datagram_connect
,
1659 .disconnect
= udp_disconnect
,
1661 .init
= udp_init_sock
,
1662 .destroy
= udpv6_destroy_sock
,
1663 .setsockopt
= udpv6_setsockopt
,
1664 .getsockopt
= udpv6_getsockopt
,
1665 .sendmsg
= udpv6_sendmsg
,
1666 .recvmsg
= udpv6_recvmsg
,
1667 .release_cb
= ip6_datagram_release_cb
,
1668 .hash
= udp_lib_hash
,
1669 .unhash
= udp_lib_unhash
,
1670 .rehash
= udp_v6_rehash
,
1671 .get_port
= udp_v6_get_port
,
1672 .memory_allocated
= &udp_memory_allocated
,
1673 .sysctl_mem
= sysctl_udp_mem
,
1674 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
1675 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
1676 .obj_size
= sizeof(struct udp6_sock
),
1677 .h
.udp_table
= &udp_table
,
1678 #ifdef CONFIG_COMPAT
1679 .compat_setsockopt
= compat_udpv6_setsockopt
,
1680 .compat_getsockopt
= compat_udpv6_getsockopt
,
1682 .diag_destroy
= udp_abort
,
1685 static struct inet_protosw udpv6_protosw
= {
1687 .protocol
= IPPROTO_UDP
,
1688 .prot
= &udpv6_prot
,
1689 .ops
= &inet6_dgram_ops
,
1690 .flags
= INET_PROTOSW_PERMANENT
,
1693 int __init
udpv6_init(void)
1697 ret
= inet6_add_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1701 ret
= inet6_register_protosw(&udpv6_protosw
);
1703 goto out_udpv6_protocol
;
1708 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1712 void udpv6_exit(void)
1714 inet6_unregister_protosw(&udpv6_protosw
);
1715 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);