1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
9 * Based on linux/ipv4/udp.c
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
57 static u32
udp6_ehashfn(const struct net
*net
,
58 const struct in6_addr
*laddr
,
60 const struct in6_addr
*faddr
,
63 static u32 udp6_ehash_secret __read_mostly
;
64 static u32 udp_ipv6_hash_secret __read_mostly
;
68 net_get_random_once(&udp6_ehash_secret
,
69 sizeof(udp6_ehash_secret
));
70 net_get_random_once(&udp_ipv6_hash_secret
,
71 sizeof(udp_ipv6_hash_secret
));
73 lhash
= (__force u32
)laddr
->s6_addr32
[3];
74 fhash
= __ipv6_addr_jhash(faddr
, udp_ipv6_hash_secret
);
76 return __inet6_ehashfn(lhash
, lport
, fhash
, fport
,
77 udp_ipv6_hash_secret
+ net_hash_mix(net
));
80 int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
82 unsigned int hash2_nulladdr
=
83 ipv6_portaddr_hash(sock_net(sk
), &in6addr_any
, snum
);
84 unsigned int hash2_partial
=
85 ipv6_portaddr_hash(sock_net(sk
), &sk
->sk_v6_rcv_saddr
, 0);
87 /* precompute partial secondary hash */
88 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
89 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
92 void udp_v6_rehash(struct sock
*sk
)
94 u16 new_hash
= ipv6_portaddr_hash(sock_net(sk
),
96 inet_sk(sk
)->inet_num
);
98 udp_lib_rehash(sk
, new_hash
);
101 static int compute_score(struct sock
*sk
, struct net
*net
,
102 const struct in6_addr
*saddr
, __be16 sport
,
103 const struct in6_addr
*daddr
, unsigned short hnum
,
107 struct inet_sock
*inet
;
110 if (!net_eq(sock_net(sk
), net
) ||
111 udp_sk(sk
)->udp_port_hash
!= hnum
||
112 sk
->sk_family
!= PF_INET6
)
115 if (!ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, daddr
))
121 if (inet
->inet_dport
) {
122 if (inet
->inet_dport
!= sport
)
127 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
128 if (!ipv6_addr_equal(&sk
->sk_v6_daddr
, saddr
))
133 dev_match
= udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
);
138 if (READ_ONCE(sk
->sk_incoming_cpu
) == raw_smp_processor_id())
144 static struct sock
*lookup_reuseport(struct net
*net
, struct sock
*sk
,
146 const struct in6_addr
*saddr
,
148 const struct in6_addr
*daddr
,
151 struct sock
*reuse_sk
= NULL
;
154 if (sk
->sk_reuseport
&& sk
->sk_state
!= TCP_ESTABLISHED
) {
155 hash
= udp6_ehashfn(net
, daddr
, hnum
, saddr
, sport
);
156 reuse_sk
= reuseport_select_sock(sk
, hash
, skb
,
157 sizeof(struct udphdr
));
162 /* called with rcu_read_lock() */
163 static struct sock
*udp6_lib_lookup2(struct net
*net
,
164 const struct in6_addr
*saddr
, __be16 sport
,
165 const struct in6_addr
*daddr
, unsigned int hnum
,
166 int dif
, int sdif
, struct udp_hslot
*hslot2
,
169 struct sock
*sk
, *result
;
174 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
175 score
= compute_score(sk
, net
, saddr
, sport
,
176 daddr
, hnum
, dif
, sdif
);
177 if (score
> badness
) {
178 result
= lookup_reuseport(net
, sk
, skb
,
179 saddr
, sport
, daddr
, hnum
);
180 /* Fall back to scoring if group has connections */
181 if (result
&& !reuseport_has_conns(sk
, false))
184 result
= result
? : sk
;
191 static inline struct sock
*udp6_lookup_run_bpf(struct net
*net
,
192 struct udp_table
*udptable
,
194 const struct in6_addr
*saddr
,
196 const struct in6_addr
*daddr
,
199 struct sock
*sk
, *reuse_sk
;
202 if (udptable
!= &udp_table
)
203 return NULL
; /* only UDP is supported */
205 no_reuseport
= bpf_sk_lookup_run_v6(net
, IPPROTO_UDP
,
206 saddr
, sport
, daddr
, hnum
, &sk
);
207 if (no_reuseport
|| IS_ERR_OR_NULL(sk
))
210 reuse_sk
= lookup_reuseport(net
, sk
, skb
, saddr
, sport
, daddr
, hnum
);
216 /* rcu_read_lock() must be held */
217 struct sock
*__udp6_lib_lookup(struct net
*net
,
218 const struct in6_addr
*saddr
, __be16 sport
,
219 const struct in6_addr
*daddr
, __be16 dport
,
220 int dif
, int sdif
, struct udp_table
*udptable
,
223 unsigned short hnum
= ntohs(dport
);
224 unsigned int hash2
, slot2
;
225 struct udp_hslot
*hslot2
;
226 struct sock
*result
, *sk
;
228 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
);
229 slot2
= hash2
& udptable
->mask
;
230 hslot2
= &udptable
->hash2
[slot2
];
232 /* Lookup connected or non-wildcard sockets */
233 result
= udp6_lib_lookup2(net
, saddr
, sport
,
234 daddr
, hnum
, dif
, sdif
,
236 if (!IS_ERR_OR_NULL(result
) && result
->sk_state
== TCP_ESTABLISHED
)
239 /* Lookup redirect from BPF */
240 if (static_branch_unlikely(&bpf_sk_lookup_enabled
)) {
241 sk
= udp6_lookup_run_bpf(net
, udptable
, skb
,
242 saddr
, sport
, daddr
, hnum
);
249 /* Got non-wildcard socket or error on first lookup */
253 /* Lookup wildcard sockets */
254 hash2
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
);
255 slot2
= hash2
& udptable
->mask
;
256 hslot2
= &udptable
->hash2
[slot2
];
258 result
= udp6_lib_lookup2(net
, saddr
, sport
,
259 &in6addr_any
, hnum
, dif
, sdif
,
266 EXPORT_SYMBOL_GPL(__udp6_lib_lookup
);
268 static struct sock
*__udp6_lib_lookup_skb(struct sk_buff
*skb
,
269 __be16 sport
, __be16 dport
,
270 struct udp_table
*udptable
)
272 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
274 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
275 &iph
->daddr
, dport
, inet6_iif(skb
),
276 inet6_sdif(skb
), udptable
, skb
);
279 struct sock
*udp6_lib_lookup_skb(const struct sk_buff
*skb
,
280 __be16 sport
, __be16 dport
)
282 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
284 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
285 &iph
->daddr
, dport
, inet6_iif(skb
),
286 inet6_sdif(skb
), &udp_table
, NULL
);
289 /* Must be called under rcu_read_lock().
290 * Does increment socket refcount.
292 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
293 struct sock
*udp6_lib_lookup(struct net
*net
, const struct in6_addr
*saddr
, __be16 sport
,
294 const struct in6_addr
*daddr
, __be16 dport
, int dif
)
298 sk
= __udp6_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
299 dif
, 0, &udp_table
, NULL
);
300 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
304 EXPORT_SYMBOL_GPL(udp6_lib_lookup
);
307 /* do not use the scratch area len for jumbogram: their length execeeds the
308 * scratch area space; note that the IP6CB flags is still in the first
309 * cacheline, so checking for jumbograms is cheap
311 static int udp6_skb_len(struct sk_buff
*skb
)
313 return unlikely(inet6_is_jumbogram(skb
)) ? skb
->len
: udp_skb_len(skb
);
317 * This should be easy, if there is something there we
318 * return it, otherwise we block.
321 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
322 int noblock
, int flags
, int *addr_len
)
324 struct ipv6_pinfo
*np
= inet6_sk(sk
);
325 struct inet_sock
*inet
= inet_sk(sk
);
327 unsigned int ulen
, copied
;
328 int off
, err
, peeking
= flags
& MSG_PEEK
;
329 int is_udplite
= IS_UDPLITE(sk
);
330 struct udp_mib __percpu
*mib
;
331 bool checksum_valid
= false;
334 if (flags
& MSG_ERRQUEUE
)
335 return ipv6_recv_error(sk
, msg
, len
, addr_len
);
337 if (np
->rxpmtu
&& np
->rxopt
.bits
.rxpmtu
)
338 return ipv6_recv_rxpmtu(sk
, msg
, len
, addr_len
);
341 off
= sk_peek_offset(sk
, flags
);
342 skb
= __skb_recv_udp(sk
, flags
, noblock
, &off
, &err
);
346 ulen
= udp6_skb_len(skb
);
348 if (copied
> ulen
- off
)
350 else if (copied
< ulen
)
351 msg
->msg_flags
|= MSG_TRUNC
;
353 is_udp4
= (skb
->protocol
== htons(ETH_P_IP
));
354 mib
= __UDPX_MIB(sk
, is_udp4
);
357 * If checksum is needed at all, try to do it while copying the
358 * data. If the data is truncated, or if we only want a partial
359 * coverage checksum (UDP-Lite), do it before the copy.
362 if (copied
< ulen
|| peeking
||
363 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
364 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
365 !__udp_lib_checksum_complete(skb
);
370 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
371 if (udp_skb_is_linear(skb
))
372 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
374 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
376 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
382 atomic_inc(&sk
->sk_drops
);
383 SNMP_INC_STATS(mib
, UDP_MIB_INERRORS
);
389 SNMP_INC_STATS(mib
, UDP_MIB_INDATAGRAMS
);
391 sock_recv_ts_and_drops(msg
, sk
, skb
);
393 /* Copy the address. */
395 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
396 sin6
->sin6_family
= AF_INET6
;
397 sin6
->sin6_port
= udp_hdr(skb
)->source
;
398 sin6
->sin6_flowinfo
= 0;
401 ipv6_addr_set_v4mapped(ip_hdr(skb
)->saddr
,
403 sin6
->sin6_scope_id
= 0;
405 sin6
->sin6_addr
= ipv6_hdr(skb
)->saddr
;
406 sin6
->sin6_scope_id
=
407 ipv6_iface_scope_id(&sin6
->sin6_addr
,
410 *addr_len
= sizeof(*sin6
);
412 if (cgroup_bpf_enabled
)
413 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk
,
414 (struct sockaddr
*)sin6
);
417 if (udp_sk(sk
)->gro_enabled
)
418 udp_cmsg_recv(msg
, sk
, skb
);
421 ip6_datagram_recv_common_ctl(sk
, msg
, skb
);
424 if (inet
->cmsg_flags
)
425 ip_cmsg_recv_offset(msg
, sk
, skb
,
426 sizeof(struct udphdr
), off
);
429 ip6_datagram_recv_specific_ctl(sk
, msg
, skb
);
433 if (flags
& MSG_TRUNC
)
436 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
440 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
441 udp_skb_destructor
)) {
442 SNMP_INC_STATS(mib
, UDP_MIB_CSUMERRORS
);
443 SNMP_INC_STATS(mib
, UDP_MIB_INERRORS
);
447 /* starting over for a new packet, but check if we need to yield */
449 msg
->msg_flags
&= ~MSG_TRUNC
;
453 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
454 void udpv6_encap_enable(void)
456 static_branch_inc(&udpv6_encap_needed_key
);
458 EXPORT_SYMBOL(udpv6_encap_enable
);
460 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
461 * through error handlers in encapsulations looking for a match.
463 static int __udp6_lib_err_encap_no_sk(struct sk_buff
*skb
,
464 struct inet6_skb_parm
*opt
,
465 u8 type
, u8 code
, int offset
, __be32 info
)
469 for (i
= 0; i
< MAX_IPTUN_ENCAP_OPS
; i
++) {
470 int (*handler
)(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
471 u8 type
, u8 code
, int offset
, __be32 info
);
472 const struct ip6_tnl_encap_ops
*encap
;
474 encap
= rcu_dereference(ip6tun_encaps
[i
]);
477 handler
= encap
->err_handler
;
478 if (handler
&& !handler(skb
, opt
, type
, code
, offset
, info
))
485 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
486 * reversing source and destination port: this will match tunnels that force the
487 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
488 * lwtunnels might actually break this assumption by being configured with
489 * different destination ports on endpoints, in this case we won't be able to
490 * trace ICMP messages back to them.
492 * If this doesn't match any socket, probe tunnels with arbitrary destination
493 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
494 * we've sent packets to won't necessarily match the local destination port.
496 * Then ask the tunnel implementation to match the error against a valid
499 * Return an error if we can't find a match, the socket if we need further
500 * processing, zero otherwise.
502 static struct sock
*__udp6_lib_err_encap(struct net
*net
,
503 const struct ipv6hdr
*hdr
, int offset
,
505 struct udp_table
*udptable
,
507 struct inet6_skb_parm
*opt
,
508 u8 type
, u8 code
, __be32 info
)
510 int network_offset
, transport_offset
;
513 network_offset
= skb_network_offset(skb
);
514 transport_offset
= skb_transport_offset(skb
);
516 /* Network header needs to point to the outer IPv6 header inside ICMP */
517 skb_reset_network_header(skb
);
519 /* Transport header needs to point to the UDP header */
520 skb_set_transport_header(skb
, offset
);
522 sk
= __udp6_lib_lookup(net
, &hdr
->daddr
, uh
->source
,
523 &hdr
->saddr
, uh
->dest
,
524 inet6_iif(skb
), 0, udptable
, skb
);
526 int (*lookup
)(struct sock
*sk
, struct sk_buff
*skb
);
527 struct udp_sock
*up
= udp_sk(sk
);
529 lookup
= READ_ONCE(up
->encap_err_lookup
);
530 if (!lookup
|| lookup(sk
, skb
))
535 sk
= ERR_PTR(__udp6_lib_err_encap_no_sk(skb
, opt
, type
, code
,
539 skb_set_transport_header(skb
, transport_offset
);
540 skb_set_network_header(skb
, network_offset
);
545 int __udp6_lib_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
546 u8 type
, u8 code
, int offset
, __be32 info
,
547 struct udp_table
*udptable
)
549 struct ipv6_pinfo
*np
;
550 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
551 const struct in6_addr
*saddr
= &hdr
->saddr
;
552 const struct in6_addr
*daddr
= &hdr
->daddr
;
553 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+offset
);
558 struct net
*net
= dev_net(skb
->dev
);
560 sk
= __udp6_lib_lookup(net
, daddr
, uh
->dest
, saddr
, uh
->source
,
561 inet6_iif(skb
), inet6_sdif(skb
), udptable
, NULL
);
562 if (!sk
|| udp_sk(sk
)->encap_type
) {
563 /* No socket for error: try tunnels before discarding */
564 sk
= ERR_PTR(-ENOENT
);
565 if (static_branch_unlikely(&udpv6_encap_needed_key
)) {
566 sk
= __udp6_lib_err_encap(net
, hdr
, offset
, uh
,
568 opt
, type
, code
, info
);
574 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
582 harderr
= icmpv6_err_convert(type
, code
, &err
);
585 if (type
== ICMPV6_PKT_TOOBIG
) {
586 if (!ip6_sk_accept_pmtu(sk
))
588 ip6_sk_update_pmtu(skb
, sk
, info
);
589 if (np
->pmtudisc
!= IPV6_PMTUDISC_DONT
)
592 if (type
== NDISC_REDIRECT
) {
594 ip6_redirect(skb
, sock_net(sk
), inet6_iif(skb
),
595 sk
->sk_mark
, sk
->sk_uid
);
597 ip6_sk_redirect(skb
, sk
);
602 /* Tunnels don't have an application socket: don't pass errors back */
607 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
610 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
614 sk
->sk_error_report(sk
);
619 static int __udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
623 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
624 sock_rps_save_rxhash(sk
, skb
);
625 sk_mark_napi_id(sk
, skb
);
626 sk_incoming_cpu_update(sk
);
628 sk_mark_napi_id_once(sk
, skb
);
631 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
633 int is_udplite
= IS_UDPLITE(sk
);
635 /* Note that an ENOMEM error is charged twice */
637 UDP6_INC_STATS(sock_net(sk
),
638 UDP_MIB_RCVBUFERRORS
, is_udplite
);
640 UDP6_INC_STATS(sock_net(sk
),
641 UDP_MIB_MEMERRORS
, is_udplite
);
642 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
650 static __inline__
int udpv6_err(struct sk_buff
*skb
,
651 struct inet6_skb_parm
*opt
, u8 type
,
652 u8 code
, int offset
, __be32 info
)
654 return __udp6_lib_err(skb
, opt
, type
, code
, offset
, info
, &udp_table
);
657 static int udpv6_queue_rcv_one_skb(struct sock
*sk
, struct sk_buff
*skb
)
659 struct udp_sock
*up
= udp_sk(sk
);
660 int is_udplite
= IS_UDPLITE(sk
);
662 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
665 if (static_branch_unlikely(&udpv6_encap_needed_key
) && up
->encap_type
) {
666 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
669 * This is an encapsulation socket so pass the skb to
670 * the socket's udp_encap_rcv() hook. Otherwise, just
671 * fall through and pass this up the UDP socket.
672 * up->encap_rcv() returns the following value:
673 * =0 if skb was successfully passed to the encap
674 * handler or was discarded by it.
675 * >0 if skb should be passed on to UDP.
676 * <0 if skb should be resubmitted as proto -N
679 /* if we're overly short, let UDP handle it */
680 encap_rcv
= READ_ONCE(up
->encap_rcv
);
684 /* Verify checksum before giving to encap */
685 if (udp_lib_checksum_complete(skb
))
688 ret
= encap_rcv(sk
, skb
);
690 __UDP_INC_STATS(sock_net(sk
),
697 /* FALLTHROUGH -- it's a UDP Packet */
701 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
703 if ((up
->pcflag
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
705 if (up
->pcrlen
== 0) { /* full coverage was set */
706 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
707 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
710 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
711 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
712 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
717 prefetch(&sk
->sk_rmem_alloc
);
718 if (rcu_access_pointer(sk
->sk_filter
) &&
719 udp_lib_checksum_complete(skb
))
722 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
725 udp_csum_pull_header(skb
);
729 return __udpv6_queue_rcv_skb(sk
, skb
);
732 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
734 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
735 atomic_inc(&sk
->sk_drops
);
740 static int udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
742 struct sk_buff
*next
, *segs
;
745 if (likely(!udp_unexpected_gso(sk
, skb
)))
746 return udpv6_queue_rcv_one_skb(sk
, skb
);
748 __skb_push(skb
, -skb_mac_offset(skb
));
749 segs
= udp_rcv_segment(sk
, skb
, false);
750 skb_list_walk_safe(segs
, skb
, next
) {
751 __skb_pull(skb
, skb_transport_offset(skb
));
753 ret
= udpv6_queue_rcv_one_skb(sk
, skb
);
755 ip6_protocol_deliver_rcu(dev_net(skb
->dev
), skb
, ret
,
761 static bool __udp_v6_is_mcast_sock(struct net
*net
, struct sock
*sk
,
762 __be16 loc_port
, const struct in6_addr
*loc_addr
,
763 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
764 int dif
, int sdif
, unsigned short hnum
)
766 struct inet_sock
*inet
= inet_sk(sk
);
768 if (!net_eq(sock_net(sk
), net
))
771 if (udp_sk(sk
)->udp_port_hash
!= hnum
||
772 sk
->sk_family
!= PF_INET6
||
773 (inet
->inet_dport
&& inet
->inet_dport
!= rmt_port
) ||
774 (!ipv6_addr_any(&sk
->sk_v6_daddr
) &&
775 !ipv6_addr_equal(&sk
->sk_v6_daddr
, rmt_addr
)) ||
776 !udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
) ||
777 (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
) &&
778 !ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, loc_addr
)))
780 if (!inet6_mc_check(sk
, loc_addr
, rmt_addr
))
785 static void udp6_csum_zero_error(struct sk_buff
*skb
)
787 /* RFC 2460 section 8.1 says that we SHOULD log
788 * this error. Well, it is reasonable.
790 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
791 &ipv6_hdr(skb
)->saddr
, ntohs(udp_hdr(skb
)->source
),
792 &ipv6_hdr(skb
)->daddr
, ntohs(udp_hdr(skb
)->dest
));
796 * Note: called only from the BH handler context,
797 * so we don't need to lock the hashes.
799 static int __udp6_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
800 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
801 struct udp_table
*udptable
, int proto
)
803 struct sock
*sk
, *first
= NULL
;
804 const struct udphdr
*uh
= udp_hdr(skb
);
805 unsigned short hnum
= ntohs(uh
->dest
);
806 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
807 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
808 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
809 int dif
= inet6_iif(skb
);
810 int sdif
= inet6_sdif(skb
);
811 struct hlist_node
*node
;
812 struct sk_buff
*nskb
;
815 hash2_any
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
) &
817 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
819 hslot
= &udptable
->hash2
[hash2
];
820 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
823 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
824 if (!__udp_v6_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
825 uh
->source
, saddr
, dif
, sdif
,
828 /* If zero checksum and no_check is not on for
829 * the socket then skip it.
831 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
837 nskb
= skb_clone(skb
, GFP_ATOMIC
);
838 if (unlikely(!nskb
)) {
839 atomic_inc(&sk
->sk_drops
);
840 __UDP6_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
842 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
,
847 if (udpv6_queue_rcv_skb(sk
, nskb
) > 0)
851 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
852 if (use_hash2
&& hash2
!= hash2_any
) {
858 if (udpv6_queue_rcv_skb(first
, skb
) > 0)
862 __UDP6_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
863 proto
== IPPROTO_UDPLITE
);
868 static void udp6_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
870 if (udp_sk_rx_dst_set(sk
, dst
)) {
871 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
873 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
877 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
878 * return code conversion for ip layer consumption
880 static int udp6_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
885 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
886 skb_checksum_try_convert(skb
, IPPROTO_UDP
, ip6_compute_pseudo
);
888 ret
= udpv6_queue_rcv_skb(sk
, skb
);
890 /* a return value > 0 means to resubmit the input */
896 int __udp6_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
899 const struct in6_addr
*saddr
, *daddr
;
900 struct net
*net
= dev_net(skb
->dev
);
906 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
909 saddr
= &ipv6_hdr(skb
)->saddr
;
910 daddr
= &ipv6_hdr(skb
)->daddr
;
913 ulen
= ntohs(uh
->len
);
917 if (proto
== IPPROTO_UDP
) {
918 /* UDP validates ulen. */
920 /* Check for jumbo payload */
924 if (ulen
< sizeof(*uh
))
927 if (ulen
< skb
->len
) {
928 if (pskb_trim_rcsum(skb
, ulen
))
930 saddr
= &ipv6_hdr(skb
)->saddr
;
931 daddr
= &ipv6_hdr(skb
)->daddr
;
936 if (udp6_csum_init(skb
, uh
, proto
))
939 /* Check if the socket is already available, e.g. due to early demux */
940 sk
= skb_steal_sock(skb
, &refcounted
);
942 struct dst_entry
*dst
= skb_dst(skb
);
945 if (unlikely(sk
->sk_rx_dst
!= dst
))
946 udp6_sk_rx_dst_set(sk
, dst
);
948 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
) {
951 goto report_csum_error
;
954 ret
= udp6_unicast_rcv_skb(sk
, skb
, uh
);
961 * Multicast receive code
963 if (ipv6_addr_is_multicast(daddr
))
964 return __udp6_lib_mcast_deliver(net
, skb
,
965 saddr
, daddr
, udptable
, proto
);
968 sk
= __udp6_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
970 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
971 goto report_csum_error
;
972 return udp6_unicast_rcv_skb(sk
, skb
, uh
);
976 goto report_csum_error
;
978 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
981 if (udp_lib_checksum_complete(skb
))
984 __UDP6_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
985 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
991 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
992 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
993 saddr
, ntohs(uh
->source
),
995 daddr
, ntohs(uh
->dest
));
999 udp6_csum_zero_error(skb
);
1001 __UDP6_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
1003 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
1009 static struct sock
*__udp6_lib_demux_lookup(struct net
*net
,
1010 __be16 loc_port
, const struct in6_addr
*loc_addr
,
1011 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
1014 unsigned short hnum
= ntohs(loc_port
);
1015 unsigned int hash2
= ipv6_portaddr_hash(net
, loc_addr
, hnum
);
1016 unsigned int slot2
= hash2
& udp_table
.mask
;
1017 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
1018 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
1021 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
1022 if (sk
->sk_state
== TCP_ESTABLISHED
&&
1023 INET6_MATCH(sk
, net
, rmt_addr
, loc_addr
, ports
, dif
, sdif
))
1025 /* Only check first socket in chain */
1031 INDIRECT_CALLABLE_SCOPE
void udp_v6_early_demux(struct sk_buff
*skb
)
1033 struct net
*net
= dev_net(skb
->dev
);
1034 const struct udphdr
*uh
;
1036 struct dst_entry
*dst
;
1037 int dif
= skb
->dev
->ifindex
;
1038 int sdif
= inet6_sdif(skb
);
1040 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) +
1041 sizeof(struct udphdr
)))
1046 if (skb
->pkt_type
== PACKET_HOST
)
1047 sk
= __udp6_lib_demux_lookup(net
, uh
->dest
,
1048 &ipv6_hdr(skb
)->daddr
,
1049 uh
->source
, &ipv6_hdr(skb
)->saddr
,
1054 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
1058 skb
->destructor
= sock_efree
;
1059 dst
= READ_ONCE(sk
->sk_rx_dst
);
1062 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1064 /* set noref for now.
1065 * any place which wants to hold dst has to call
1068 skb_dst_set_noref(skb
, dst
);
1072 INDIRECT_CALLABLE_SCOPE
int udpv6_rcv(struct sk_buff
*skb
)
1074 return __udp6_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
1078 * Throw away all pending data and cancel the corking. Socket is locked.
1080 static void udp_v6_flush_pending_frames(struct sock
*sk
)
1082 struct udp_sock
*up
= udp_sk(sk
);
1084 if (up
->pending
== AF_INET
)
1085 udp_flush_pending_frames(sk
);
1086 else if (up
->pending
) {
1089 ip6_flush_pending_frames(sk
);
1093 static int udpv6_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
1096 if (addr_len
< offsetofend(struct sockaddr
, sa_family
))
1098 /* The following checks are replicated from __ip6_datagram_connect()
1099 * and intended to prevent BPF program called below from accessing
1100 * bytes that are out of the bound specified by user in addr_len.
1102 if (uaddr
->sa_family
== AF_INET
) {
1103 if (__ipv6_only_sock(sk
))
1104 return -EAFNOSUPPORT
;
1105 return udp_pre_connect(sk
, uaddr
, addr_len
);
1108 if (addr_len
< SIN6_LEN_RFC2133
)
1111 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk
, uaddr
);
1115 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1116 * @sk: socket we are sending on
1117 * @skb: sk_buff containing the filled-in UDP header
1118 * (checksum field must be zeroed out)
1119 * @saddr: source address
1120 * @daddr: destination address
1121 * @len: length of packet
1123 static void udp6_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
1124 const struct in6_addr
*saddr
,
1125 const struct in6_addr
*daddr
, int len
)
1127 unsigned int offset
;
1128 struct udphdr
*uh
= udp_hdr(skb
);
1129 struct sk_buff
*frags
= skb_shinfo(skb
)->frag_list
;
1133 /* Only one fragment on the socket. */
1134 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1135 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1136 uh
->check
= ~csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
, 0);
1139 * HW-checksum won't work as there are two or more
1140 * fragments on the socket so that all csums of sk_buffs
1141 * should be together
1143 offset
= skb_transport_offset(skb
);
1144 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
1147 skb
->ip_summed
= CHECKSUM_NONE
;
1150 csum
= csum_add(csum
, frags
->csum
);
1151 } while ((frags
= frags
->next
));
1153 uh
->check
= csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
,
1156 uh
->check
= CSUM_MANGLED_0
;
1164 static int udp_v6_send_skb(struct sk_buff
*skb
, struct flowi6
*fl6
,
1165 struct inet_cork
*cork
)
1167 struct sock
*sk
= skb
->sk
;
1170 int is_udplite
= IS_UDPLITE(sk
);
1172 int offset
= skb_transport_offset(skb
);
1173 int len
= skb
->len
- offset
;
1174 int datalen
= len
- sizeof(*uh
);
1177 * Create a UDP header
1180 uh
->source
= fl6
->fl6_sport
;
1181 uh
->dest
= fl6
->fl6_dport
;
1182 uh
->len
= htons(len
);
1185 if (cork
->gso_size
) {
1186 const int hlen
= skb_network_header_len(skb
) +
1187 sizeof(struct udphdr
);
1189 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
1193 if (skb
->len
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
1197 if (udp_sk(sk
)->no_check6_tx
) {
1201 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| is_udplite
||
1202 dst_xfrm(skb_dst(skb
))) {
1207 if (datalen
> cork
->gso_size
) {
1208 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
1209 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
1210 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(datalen
,
1217 csum
= udplite_csum(skb
);
1218 else if (udp_sk(sk
)->no_check6_tx
) { /* UDP csum disabled */
1219 skb
->ip_summed
= CHECKSUM_NONE
;
1221 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
1223 udp6_hwcsum_outgoing(sk
, skb
, &fl6
->saddr
, &fl6
->daddr
, len
);
1226 csum
= udp_csum(skb
);
1228 /* add protocol-dependent pseudo-header */
1229 uh
->check
= csum_ipv6_magic(&fl6
->saddr
, &fl6
->daddr
,
1230 len
, fl6
->flowi6_proto
, csum
);
1232 uh
->check
= CSUM_MANGLED_0
;
1235 err
= ip6_send_skb(skb
);
1237 if (err
== -ENOBUFS
&& !inet6_sk(sk
)->recverr
) {
1238 UDP6_INC_STATS(sock_net(sk
),
1239 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1243 UDP6_INC_STATS(sock_net(sk
),
1244 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
1249 static int udp_v6_push_pending_frames(struct sock
*sk
)
1251 struct sk_buff
*skb
;
1252 struct udp_sock
*up
= udp_sk(sk
);
1256 if (up
->pending
== AF_INET
)
1257 return udp_push_pending_frames(sk
);
1259 /* ip6_finish_skb will release the cork, so make a copy of
1262 fl6
= inet_sk(sk
)->cork
.fl
.u
.ip6
;
1264 skb
= ip6_finish_skb(sk
);
1268 err
= udp_v6_send_skb(skb
, &fl6
, &inet_sk(sk
)->cork
.base
);
1276 int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1278 struct ipv6_txoptions opt_space
;
1279 struct udp_sock
*up
= udp_sk(sk
);
1280 struct inet_sock
*inet
= inet_sk(sk
);
1281 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1282 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
1283 struct in6_addr
*daddr
, *final_p
, final
;
1284 struct ipv6_txoptions
*opt
= NULL
;
1285 struct ipv6_txoptions
*opt_to_free
= NULL
;
1286 struct ip6_flowlabel
*flowlabel
= NULL
;
1288 struct dst_entry
*dst
;
1289 struct ipcm6_cookie ipc6
;
1290 int addr_len
= msg
->msg_namelen
;
1291 bool connected
= false;
1293 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
1295 int is_udplite
= IS_UDPLITE(sk
);
1296 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1299 ipc6
.gso_size
= up
->gso_size
;
1300 ipc6
.sockc
.tsflags
= sk
->sk_tsflags
;
1301 ipc6
.sockc
.mark
= sk
->sk_mark
;
1303 /* destination address check */
1305 if (addr_len
< offsetof(struct sockaddr
, sa_data
))
1308 switch (sin6
->sin6_family
) {
1310 if (addr_len
< SIN6_LEN_RFC2133
)
1312 daddr
= &sin6
->sin6_addr
;
1313 if (ipv6_addr_any(daddr
) &&
1314 ipv6_addr_v4mapped(&np
->saddr
))
1315 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
1319 goto do_udp_sendmsg
;
1321 msg
->msg_name
= sin6
= NULL
;
1322 msg
->msg_namelen
= addr_len
= 0;
1328 } else if (!up
->pending
) {
1329 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1330 return -EDESTADDRREQ
;
1331 daddr
= &sk
->sk_v6_daddr
;
1336 if (ipv6_addr_v4mapped(daddr
)) {
1337 struct sockaddr_in sin
;
1338 sin
.sin_family
= AF_INET
;
1339 sin
.sin_port
= sin6
? sin6
->sin6_port
: inet
->inet_dport
;
1340 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
1341 msg
->msg_name
= &sin
;
1342 msg
->msg_namelen
= sizeof(sin
);
1344 if (__ipv6_only_sock(sk
))
1345 return -ENETUNREACH
;
1346 return udp_sendmsg(sk
, msg
, len
);
1350 if (up
->pending
== AF_INET
)
1351 return udp_sendmsg(sk
, msg
, len
);
1353 /* Rough check on arithmetic overflow,
1354 better check is made in ip6_append_data().
1356 if (len
> INT_MAX
- sizeof(struct udphdr
))
1359 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1362 * There are pending frames.
1363 * The socket lock must be held while it's corked.
1366 if (likely(up
->pending
)) {
1367 if (unlikely(up
->pending
!= AF_INET6
)) {
1369 return -EAFNOSUPPORT
;
1372 goto do_append_data
;
1376 ulen
+= sizeof(struct udphdr
);
1378 memset(&fl6
, 0, sizeof(fl6
));
1381 if (sin6
->sin6_port
== 0)
1384 fl6
.fl6_dport
= sin6
->sin6_port
;
1385 daddr
= &sin6
->sin6_addr
;
1388 fl6
.flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
1389 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
1390 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1391 if (IS_ERR(flowlabel
))
1397 * Otherwise it will be difficult to maintain
1400 if (sk
->sk_state
== TCP_ESTABLISHED
&&
1401 ipv6_addr_equal(daddr
, &sk
->sk_v6_daddr
))
1402 daddr
= &sk
->sk_v6_daddr
;
1404 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
1405 sin6
->sin6_scope_id
&&
1406 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr
)))
1407 fl6
.flowi6_oif
= sin6
->sin6_scope_id
;
1409 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1410 return -EDESTADDRREQ
;
1412 fl6
.fl6_dport
= inet
->inet_dport
;
1413 daddr
= &sk
->sk_v6_daddr
;
1414 fl6
.flowlabel
= np
->flow_label
;
1418 if (!fl6
.flowi6_oif
)
1419 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
1421 if (!fl6
.flowi6_oif
)
1422 fl6
.flowi6_oif
= np
->sticky_pktinfo
.ipi6_ifindex
;
1424 fl6
.flowi6_mark
= ipc6
.sockc
.mark
;
1425 fl6
.flowi6_uid
= sk
->sk_uid
;
1427 if (msg
->msg_controllen
) {
1429 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
1430 opt
->tot_len
= sizeof(*opt
);
1433 err
= udp_cmsg_send(sk
, msg
, &ipc6
.gso_size
);
1435 err
= ip6_datagram_send_ctl(sock_net(sk
), sk
, msg
, &fl6
,
1438 fl6_sock_release(flowlabel
);
1441 if ((fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
1442 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1443 if (IS_ERR(flowlabel
))
1446 if (!(opt
->opt_nflen
|opt
->opt_flen
))
1451 opt
= txopt_get(np
);
1455 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
1456 opt
= ipv6_fixup_options(&opt_space
, opt
);
1459 fl6
.flowi6_proto
= sk
->sk_protocol
;
1461 if (ipv6_addr_any(&fl6
.saddr
) && !ipv6_addr_any(&np
->saddr
))
1462 fl6
.saddr
= np
->saddr
;
1463 fl6
.fl6_sport
= inet
->inet_sport
;
1465 if (cgroup_bpf_enabled
&& !connected
) {
1466 err
= BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk
,
1467 (struct sockaddr
*)sin6
, &fl6
.saddr
);
1471 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
1472 /* BPF program rewrote IPv6-only by IPv4-mapped
1473 * IPv6. It's currently unsupported.
1478 if (sin6
->sin6_port
== 0) {
1479 /* BPF program set invalid port. Reject it. */
1483 fl6
.fl6_dport
= sin6
->sin6_port
;
1484 fl6
.daddr
= sin6
->sin6_addr
;
1488 if (ipv6_addr_any(&fl6
.daddr
))
1489 fl6
.daddr
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
1491 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
1495 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
)) {
1496 fl6
.flowi6_oif
= np
->mcast_oif
;
1498 } else if (!fl6
.flowi6_oif
)
1499 fl6
.flowi6_oif
= np
->ucast_oif
;
1501 security_sk_classify_flow(sk
, flowi6_to_flowi_common(&fl6
));
1503 if (ipc6
.tclass
< 0)
1504 ipc6
.tclass
= np
->tclass
;
1506 fl6
.flowlabel
= ip6_make_flowinfo(ipc6
.tclass
, fl6
.flowlabel
);
1508 dst
= ip6_sk_dst_lookup_flow(sk
, &fl6
, final_p
, connected
);
1515 if (ipc6
.hlimit
< 0)
1516 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
1518 if (msg
->msg_flags
&MSG_CONFIRM
)
1522 /* Lockless fast path for the non-corking case */
1524 struct inet_cork_full cork
;
1525 struct sk_buff
*skb
;
1527 skb
= ip6_make_skb(sk
, getfrag
, msg
, ulen
,
1528 sizeof(struct udphdr
), &ipc6
,
1529 &fl6
, (struct rt6_info
*)dst
,
1530 msg
->msg_flags
, &cork
);
1532 if (!IS_ERR_OR_NULL(skb
))
1533 err
= udp_v6_send_skb(skb
, &fl6
, &cork
.base
);
1538 if (unlikely(up
->pending
)) {
1539 /* The socket is already corked while preparing it. */
1540 /* ... which is an evident application bug. --ANK */
1543 net_dbg_ratelimited("udp cork app bug 2\n");
1548 up
->pending
= AF_INET6
;
1551 if (ipc6
.dontfrag
< 0)
1552 ipc6
.dontfrag
= np
->dontfrag
;
1554 err
= ip6_append_data(sk
, getfrag
, msg
, ulen
, sizeof(struct udphdr
),
1555 &ipc6
, &fl6
, (struct rt6_info
*)dst
,
1556 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1558 udp_v6_flush_pending_frames(sk
);
1560 err
= udp_v6_push_pending_frames(sk
);
1561 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1565 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1571 fl6_sock_release(flowlabel
);
1572 txopt_put(opt_to_free
);
1576 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1577 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1578 * we don't have a good statistic (IpOutDiscards but it can be too many
1579 * things). We could add another new stat but at least for now that
1580 * seems like overkill.
1582 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1583 UDP6_INC_STATS(sock_net(sk
),
1584 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1589 if (msg
->msg_flags
& MSG_PROBE
)
1590 dst_confirm_neigh(dst
, &fl6
.daddr
);
1591 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1592 goto back_from_confirm
;
1597 void udpv6_destroy_sock(struct sock
*sk
)
1599 struct udp_sock
*up
= udp_sk(sk
);
1601 udp_v6_flush_pending_frames(sk
);
1604 if (static_branch_unlikely(&udpv6_encap_needed_key
)) {
1605 if (up
->encap_type
) {
1606 void (*encap_destroy
)(struct sock
*sk
);
1607 encap_destroy
= READ_ONCE(up
->encap_destroy
);
1611 if (up
->encap_enabled
)
1612 static_branch_dec(&udpv6_encap_needed_key
);
1615 inet6_destroy_sock(sk
);
1619 * Socket option code for UDP
1621 int udpv6_setsockopt(struct sock
*sk
, int level
, int optname
, sockptr_t optval
,
1622 unsigned int optlen
)
1624 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1625 return udp_lib_setsockopt(sk
, level
, optname
,
1627 udp_v6_push_pending_frames
);
1628 return ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1631 int udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1632 char __user
*optval
, int __user
*optlen
)
1634 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1635 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1636 return ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1639 /* thinking of making this const? Don't.
1640 * early_demux can change based on sysctl.
1642 static struct inet6_protocol udpv6_protocol
= {
1643 .early_demux
= udp_v6_early_demux
,
1644 .early_demux_handler
= udp_v6_early_demux
,
1645 .handler
= udpv6_rcv
,
1646 .err_handler
= udpv6_err
,
1647 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1650 /* ------------------------------------------------------------------------ */
1651 #ifdef CONFIG_PROC_FS
1652 int udp6_seq_show(struct seq_file
*seq
, void *v
)
1654 if (v
== SEQ_START_TOKEN
) {
1655 seq_puts(seq
, IPV6_SEQ_DGRAM_HEADER
);
1657 int bucket
= ((struct udp_iter_state
*)seq
->private)->bucket
;
1658 struct inet_sock
*inet
= inet_sk(v
);
1659 __u16 srcp
= ntohs(inet
->inet_sport
);
1660 __u16 destp
= ntohs(inet
->inet_dport
);
1661 __ip6_dgram_sock_seq_show(seq
, v
, srcp
, destp
,
1662 udp_rqueue_get(v
), bucket
);
1667 const struct seq_operations udp6_seq_ops
= {
1668 .start
= udp_seq_start
,
1669 .next
= udp_seq_next
,
1670 .stop
= udp_seq_stop
,
1671 .show
= udp6_seq_show
,
1673 EXPORT_SYMBOL(udp6_seq_ops
);
1675 static struct udp_seq_afinfo udp6_seq_afinfo
= {
1677 .udp_table
= &udp_table
,
1680 int __net_init
udp6_proc_init(struct net
*net
)
1682 if (!proc_create_net_data("udp6", 0444, net
->proc_net
, &udp6_seq_ops
,
1683 sizeof(struct udp_iter_state
), &udp6_seq_afinfo
))
1688 void udp6_proc_exit(struct net
*net
)
1690 remove_proc_entry("udp6", net
->proc_net
);
1692 #endif /* CONFIG_PROC_FS */
1694 /* ------------------------------------------------------------------------ */
1696 struct proto udpv6_prot
= {
1698 .owner
= THIS_MODULE
,
1699 .close
= udp_lib_close
,
1700 .pre_connect
= udpv6_pre_connect
,
1701 .connect
= ip6_datagram_connect
,
1702 .disconnect
= udp_disconnect
,
1704 .init
= udp_init_sock
,
1705 .destroy
= udpv6_destroy_sock
,
1706 .setsockopt
= udpv6_setsockopt
,
1707 .getsockopt
= udpv6_getsockopt
,
1708 .sendmsg
= udpv6_sendmsg
,
1709 .recvmsg
= udpv6_recvmsg
,
1710 .release_cb
= ip6_datagram_release_cb
,
1711 .hash
= udp_lib_hash
,
1712 .unhash
= udp_lib_unhash
,
1713 .rehash
= udp_v6_rehash
,
1714 .get_port
= udp_v6_get_port
,
1715 .memory_allocated
= &udp_memory_allocated
,
1716 .sysctl_mem
= sysctl_udp_mem
,
1717 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
1718 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
1719 .obj_size
= sizeof(struct udp6_sock
),
1720 .h
.udp_table
= &udp_table
,
1721 .diag_destroy
= udp_abort
,
1724 static struct inet_protosw udpv6_protosw
= {
1726 .protocol
= IPPROTO_UDP
,
1727 .prot
= &udpv6_prot
,
1728 .ops
= &inet6_dgram_ops
,
1729 .flags
= INET_PROTOSW_PERMANENT
,
1732 int __init
udpv6_init(void)
1736 ret
= inet6_add_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1740 ret
= inet6_register_protosw(&udpv6_protosw
);
1742 goto out_udpv6_protocol
;
1747 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1751 void udpv6_exit(void)
1753 inet6_unregister_protosw(&udpv6_protosw
);
1754 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);