1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The User Datagram Protocol (UDP).
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
12 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
13 * Hirokazu Takahashi, <taka@valinux.co.jp>
16 * Alan Cox : verify_area() calls
17 * Alan Cox : stopped close while in use off icmp
18 * messages. Not a fix but a botch that
19 * for udp at least is 'valid'.
20 * Alan Cox : Fixed icmp handling properly
21 * Alan Cox : Correct error for oversized datagrams
22 * Alan Cox : Tidied select() semantics.
23 * Alan Cox : udp_err() fixed properly, also now
24 * select and read wake correctly on errors
25 * Alan Cox : udp_send verify_area moved to avoid mem leak
26 * Alan Cox : UDP can count its memory
27 * Alan Cox : send to an unknown connection causes
28 * an ECONNREFUSED off the icmp, but
30 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
31 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
32 * bug no longer crashes it.
33 * Fred Van Kempen : Net2e support for sk->broadcast.
34 * Alan Cox : Uses skb_free_datagram
35 * Alan Cox : Added get/set sockopt support.
36 * Alan Cox : Broadcasting without option set returns EACCES.
37 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
38 * Alan Cox : Use ip_tos and ip_ttl
39 * Alan Cox : SNMP Mibs
40 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
41 * Matt Dillon : UDP length checks.
42 * Alan Cox : Smarter af_inet used properly.
43 * Alan Cox : Use new kernel side addressing.
44 * Alan Cox : Incorrect return on truncated datagram receive.
45 * Arnt Gulbrandsen : New udp_send and stuff
46 * Alan Cox : Cache last socket
47 * Alan Cox : Route cache
48 * Jon Peatfield : Minor efficiency fix to sendto().
49 * Mike Shaver : RFC1122 checks.
50 * Alan Cox : Nonblocking error fix.
51 * Willy Konynenberg : Transparent proxying support.
52 * Mike McLagan : Routing by source
53 * David S. Miller : New socket lookup architecture.
54 * Last socket cache retained as it
55 * does have a high hit rate.
56 * Olaf Kirch : Don't linearise iovec on sendmsg.
57 * Andi Kleen : Some cleanups, cache destination entry
59 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
60 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
61 * return ENOTCONN for unconnected sockets (POSIX)
62 * Janos Farkas : don't deliver multi/broadcasts to a different
63 * bound-to-device socket
64 * Hirokazu Takahashi : HW checksumming for outgoing UDP
66 * Hirokazu Takahashi : sendfile() on UDP works now.
67 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
68 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
69 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
70 * a single port at the same time.
71 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
72 * James Chapman : Add L2TP encapsulation type.
75 #define pr_fmt(fmt) "UDP: " fmt
77 #include <linux/bpf-cgroup.h>
78 #include <linux/uaccess.h>
79 #include <asm/ioctls.h>
80 #include <linux/memblock.h>
81 #include <linux/highmem.h>
82 #include <linux/types.h>
83 #include <linux/fcntl.h>
84 #include <linux/module.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/igmp.h>
88 #include <linux/inetdevice.h>
90 #include <linux/errno.h>
91 #include <linux/timer.h>
93 #include <linux/inet.h>
94 #include <linux/netdevice.h>
95 #include <linux/slab.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/net_namespace.h>
101 #include <net/icmp.h>
102 #include <net/inet_hashtables.h>
104 #include <net/ip_tunnels.h>
105 #include <net/route.h>
106 #include <net/checksum.h>
108 #include <net/xfrm.h>
109 #include <trace/events/udp.h>
110 #include <linux/static_key.h>
111 #include <linux/btf_ids.h>
112 #include <trace/events/skb.h>
113 #include <net/busy_poll.h>
114 #include "udp_impl.h"
115 #include <net/sock_reuseport.h>
116 #include <net/addrconf.h>
117 #include <net/udp_tunnel.h>
119 #if IS_ENABLED(CONFIG_IPV6)
120 #include <net/ipv6_stubs.h>
123 struct udp_table udp_table __read_mostly
;
124 EXPORT_SYMBOL(udp_table
);
126 long sysctl_udp_mem
[3] __read_mostly
;
127 EXPORT_SYMBOL(sysctl_udp_mem
);
129 atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp
;
130 EXPORT_SYMBOL(udp_memory_allocated
);
131 DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc
);
132 EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc
);
134 #define MAX_UDP_PORTS 65536
135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN_PERNET)
137 static struct udp_table
*udp_get_table_prot(struct sock
*sk
)
139 return sk
->sk_prot
->h
.udp_table
? : sock_net(sk
)->ipv4
.udp_table
;
142 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
143 const struct udp_hslot
*hslot
,
144 unsigned long *bitmap
,
145 struct sock
*sk
, unsigned int log
)
148 kuid_t uid
= sock_i_uid(sk
);
150 sk_for_each(sk2
, &hslot
->head
) {
151 if (net_eq(sock_net(sk2
), net
) &&
153 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
154 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
155 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
156 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
157 inet_rcv_saddr_equal(sk
, sk2
, true)) {
158 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
159 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
160 uid_eq(uid
, sock_i_uid(sk2
))) {
166 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
,
175 * Note: we still hold spinlock of primary hash chain, so no other writer
176 * can insert/delete a socket with local_port == num
178 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
179 struct udp_hslot
*hslot2
,
183 kuid_t uid
= sock_i_uid(sk
);
186 spin_lock(&hslot2
->lock
);
187 udp_portaddr_for_each_entry(sk2
, &hslot2
->head
) {
188 if (net_eq(sock_net(sk2
), net
) &&
190 (udp_sk(sk2
)->udp_port_hash
== num
) &&
191 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
192 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
193 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
194 inet_rcv_saddr_equal(sk
, sk2
, true)) {
195 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
196 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
197 uid_eq(uid
, sock_i_uid(sk2
))) {
205 spin_unlock(&hslot2
->lock
);
209 static int udp_reuseport_add_sock(struct sock
*sk
, struct udp_hslot
*hslot
)
211 struct net
*net
= sock_net(sk
);
212 kuid_t uid
= sock_i_uid(sk
);
215 sk_for_each(sk2
, &hslot
->head
) {
216 if (net_eq(sock_net(sk2
), net
) &&
218 sk2
->sk_family
== sk
->sk_family
&&
219 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
220 (udp_sk(sk2
)->udp_port_hash
== udp_sk(sk
)->udp_port_hash
) &&
221 (sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
222 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
223 inet_rcv_saddr_equal(sk
, sk2
, false)) {
224 return reuseport_add_sock(sk
, sk2
,
225 inet_rcv_saddr_any(sk
));
229 return reuseport_alloc(sk
, inet_rcv_saddr_any(sk
));
233 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
235 * @sk: socket struct in question
236 * @snum: port number to look up
237 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
240 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
241 unsigned int hash2_nulladdr
)
243 struct udp_table
*udptable
= udp_get_table_prot(sk
);
244 struct udp_hslot
*hslot
, *hslot2
;
245 struct net
*net
= sock_net(sk
);
246 int error
= -EADDRINUSE
;
249 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
250 unsigned short first
, last
;
251 int low
, high
, remaining
;
254 inet_sk_get_local_port_range(sk
, &low
, &high
);
255 remaining
= (high
- low
) + 1;
257 rand
= get_random_u32();
258 first
= reciprocal_scale(rand
, remaining
) + low
;
260 * force rand to be an odd multiple of UDP_HTABLE_SIZE
262 rand
= (rand
| 1) * (udptable
->mask
+ 1);
263 last
= first
+ udptable
->mask
+ 1;
265 hslot
= udp_hashslot(udptable
, net
, first
);
266 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
267 spin_lock_bh(&hslot
->lock
);
268 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
273 * Iterate on all possible values of snum for this hash.
274 * Using steps of an odd multiple of UDP_HTABLE_SIZE
275 * give us randomization and full range coverage.
278 if (low
<= snum
&& snum
<= high
&&
279 !test_bit(snum
>> udptable
->log
, bitmap
) &&
280 !inet_is_local_reserved_port(net
, snum
))
283 } while (snum
!= first
);
284 spin_unlock_bh(&hslot
->lock
);
286 } while (++first
!= last
);
289 hslot
= udp_hashslot(udptable
, net
, snum
);
290 spin_lock_bh(&hslot
->lock
);
291 if (hslot
->count
> 10) {
293 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
295 slot2
&= udptable
->mask
;
296 hash2_nulladdr
&= udptable
->mask
;
298 hslot2
= udp_hashslot2(udptable
, slot2
);
299 if (hslot
->count
< hslot2
->count
)
300 goto scan_primary_hash
;
302 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
, sk
);
303 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
304 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
305 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
314 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
, 0))
318 inet_sk(sk
)->inet_num
= snum
;
319 udp_sk(sk
)->udp_port_hash
= snum
;
320 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
321 if (sk_unhashed(sk
)) {
322 if (sk
->sk_reuseport
&&
323 udp_reuseport_add_sock(sk
, hslot
)) {
324 inet_sk(sk
)->inet_num
= 0;
325 udp_sk(sk
)->udp_port_hash
= 0;
326 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
330 sock_set_flag(sk
, SOCK_RCU_FREE
);
332 sk_add_node_rcu(sk
, &hslot
->head
);
334 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
336 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
337 spin_lock(&hslot2
->lock
);
338 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
339 sk
->sk_family
== AF_INET6
)
340 hlist_add_tail_rcu(&udp_sk(sk
)->udp_portaddr_node
,
343 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
346 spin_unlock(&hslot2
->lock
);
351 spin_unlock_bh(&hslot
->lock
);
355 EXPORT_SYMBOL(udp_lib_get_port
);
357 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
359 unsigned int hash2_nulladdr
=
360 ipv4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
361 unsigned int hash2_partial
=
362 ipv4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
364 /* precompute partial secondary hash */
365 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
366 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
369 static int compute_score(struct sock
*sk
, const struct net
*net
,
370 __be32 saddr
, __be16 sport
,
371 __be32 daddr
, unsigned short hnum
,
375 struct inet_sock
*inet
;
378 if (!net_eq(sock_net(sk
), net
) ||
379 udp_sk(sk
)->udp_port_hash
!= hnum
||
383 if (sk
->sk_rcv_saddr
!= daddr
)
386 score
= (sk
->sk_family
== PF_INET
) ? 2 : 1;
389 if (inet
->inet_daddr
) {
390 if (inet
->inet_daddr
!= saddr
)
395 if (inet
->inet_dport
) {
396 if (inet
->inet_dport
!= sport
)
401 dev_match
= udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
,
405 if (sk
->sk_bound_dev_if
)
408 if (READ_ONCE(sk
->sk_incoming_cpu
) == raw_smp_processor_id())
413 u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
, const __u16 lport
,
414 const __be32 faddr
, const __be16 fport
)
416 net_get_random_once(&udp_ehash_secret
, sizeof(udp_ehash_secret
));
418 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
419 udp_ehash_secret
+ net_hash_mix(net
));
421 EXPORT_SYMBOL(udp_ehashfn
);
423 /* called with rcu_read_lock() */
424 static struct sock
*udp4_lib_lookup2(const struct net
*net
,
425 __be32 saddr
, __be16 sport
,
426 __be32 daddr
, unsigned int hnum
,
428 struct udp_hslot
*hslot2
,
431 struct sock
*sk
, *result
;
437 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
438 need_rescore
= false;
440 score
= compute_score(need_rescore
? result
: sk
, net
, saddr
,
441 sport
, daddr
, hnum
, dif
, sdif
);
442 if (score
> badness
) {
448 if (sk
->sk_state
== TCP_ESTABLISHED
) {
453 result
= inet_lookup_reuseport(net
, sk
, skb
, sizeof(struct udphdr
),
454 saddr
, sport
, daddr
, hnum
, udp_ehashfn
);
460 /* Fall back to scoring if group has connections */
461 if (!reuseport_has_conns(sk
))
464 /* Reuseport logic returned an error, keep original score. */
468 /* compute_score is too long of a function to be
469 * inlined, and calling it again here yields
470 * measureable overhead for some
471 * workloads. Work around it by jumping
472 * backwards to rescore 'result'.
481 #if IS_ENABLED(CONFIG_BASE_SMALL)
482 static struct sock
*udp4_lib_lookup4(const struct net
*net
,
483 __be32 saddr
, __be16 sport
,
484 __be32 daddr
, unsigned int hnum
,
486 struct udp_table
*udptable
)
491 static void udp_rehash4(struct udp_table
*udptable
, struct sock
*sk
,
496 static void udp_unhash4(struct udp_table
*udptable
, struct sock
*sk
)
499 #else /* !CONFIG_BASE_SMALL */
500 static struct sock
*udp4_lib_lookup4(const struct net
*net
,
501 __be32 saddr
, __be16 sport
,
502 __be32 daddr
, unsigned int hnum
,
504 struct udp_table
*udptable
)
506 const __portpair ports
= INET_COMBINED_PORTS(sport
, hnum
);
507 const struct hlist_nulls_node
*node
;
508 struct udp_hslot
*hslot4
;
509 unsigned int hash4
, slot
;
513 hash4
= udp_ehashfn(net
, daddr
, hnum
, saddr
, sport
);
514 slot
= hash4
& udptable
->mask
;
515 hslot4
= &udptable
->hash4
[slot
];
516 INET_ADDR_COOKIE(acookie
, saddr
, daddr
);
519 /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */
520 udp_lrpa_for_each_entry_rcu(up
, node
, &hslot4
->nulls_head
) {
521 sk
= (struct sock
*)up
;
522 if (inet_match(net
, sk
, acookie
, ports
, dif
, sdif
))
526 /* if the nulls value we got at the end of this lookup is not the
527 * expected one, we must restart lookup. We probably met an item that
528 * was moved to another chain due to rehash.
530 if (get_nulls_value(node
) != slot
)
536 /* In hash4, rehash can happen in connect(), where hash4_cnt keeps unchanged. */
537 static void udp_rehash4(struct udp_table
*udptable
, struct sock
*sk
,
540 struct udp_hslot
*hslot4
, *nhslot4
;
542 hslot4
= udp_hashslot4(udptable
, udp_sk(sk
)->udp_lrpa_hash
);
543 nhslot4
= udp_hashslot4(udptable
, newhash4
);
544 udp_sk(sk
)->udp_lrpa_hash
= newhash4
;
546 if (hslot4
!= nhslot4
) {
547 spin_lock_bh(&hslot4
->lock
);
548 hlist_nulls_del_init_rcu(&udp_sk(sk
)->udp_lrpa_node
);
550 spin_unlock_bh(&hslot4
->lock
);
552 spin_lock_bh(&nhslot4
->lock
);
553 hlist_nulls_add_head_rcu(&udp_sk(sk
)->udp_lrpa_node
,
554 &nhslot4
->nulls_head
);
556 spin_unlock_bh(&nhslot4
->lock
);
560 static void udp_unhash4(struct udp_table
*udptable
, struct sock
*sk
)
562 struct udp_hslot
*hslot2
, *hslot4
;
564 if (udp_hashed4(sk
)) {
565 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
566 hslot4
= udp_hashslot4(udptable
, udp_sk(sk
)->udp_lrpa_hash
);
568 spin_lock(&hslot4
->lock
);
569 hlist_nulls_del_init_rcu(&udp_sk(sk
)->udp_lrpa_node
);
571 spin_unlock(&hslot4
->lock
);
573 spin_lock(&hslot2
->lock
);
574 udp_hash4_dec(hslot2
);
575 spin_unlock(&hslot2
->lock
);
579 void udp_lib_hash4(struct sock
*sk
, u16 hash
)
581 struct udp_hslot
*hslot
, *hslot2
, *hslot4
;
582 struct net
*net
= sock_net(sk
);
583 struct udp_table
*udptable
;
585 /* Connected udp socket can re-connect to another remote address,
586 * so rehash4 is needed.
588 udptable
= net
->ipv4
.udp_table
;
589 if (udp_hashed4(sk
)) {
590 udp_rehash4(udptable
, sk
, hash
);
594 hslot
= udp_hashslot(udptable
, net
, udp_sk(sk
)->udp_port_hash
);
595 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
596 hslot4
= udp_hashslot4(udptable
, hash
);
597 udp_sk(sk
)->udp_lrpa_hash
= hash
;
599 spin_lock_bh(&hslot
->lock
);
600 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
601 reuseport_detach_sock(sk
);
603 spin_lock(&hslot4
->lock
);
604 hlist_nulls_add_head_rcu(&udp_sk(sk
)->udp_lrpa_node
,
605 &hslot4
->nulls_head
);
607 spin_unlock(&hslot4
->lock
);
609 spin_lock(&hslot2
->lock
);
610 udp_hash4_inc(hslot2
);
611 spin_unlock(&hslot2
->lock
);
613 spin_unlock_bh(&hslot
->lock
);
615 EXPORT_SYMBOL(udp_lib_hash4
);
617 /* call with sock lock */
618 void udp4_hash4(struct sock
*sk
)
620 struct net
*net
= sock_net(sk
);
623 if (sk_unhashed(sk
) || sk
->sk_rcv_saddr
== htonl(INADDR_ANY
))
626 hash
= udp_ehashfn(net
, sk
->sk_rcv_saddr
, sk
->sk_num
,
627 sk
->sk_daddr
, sk
->sk_dport
);
629 udp_lib_hash4(sk
, hash
);
631 EXPORT_SYMBOL(udp4_hash4
);
632 #endif /* CONFIG_BASE_SMALL */
634 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
635 * harder than this. -DaveM
637 struct sock
*__udp4_lib_lookup(const struct net
*net
, __be32 saddr
,
638 __be16 sport
, __be32 daddr
, __be16 dport
, int dif
,
639 int sdif
, struct udp_table
*udptable
, struct sk_buff
*skb
)
641 unsigned short hnum
= ntohs(dport
);
642 struct udp_hslot
*hslot2
;
643 struct sock
*result
, *sk
;
646 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
);
647 hslot2
= udp_hashslot2(udptable
, hash2
);
649 if (udp_has_hash4(hslot2
)) {
650 result
= udp4_lib_lookup4(net
, saddr
, sport
, daddr
, hnum
,
651 dif
, sdif
, udptable
);
652 if (result
) /* udp4_lib_lookup4 return sk or NULL */
656 /* Lookup connected or non-wildcard socket */
657 result
= udp4_lib_lookup2(net
, saddr
, sport
,
658 daddr
, hnum
, dif
, sdif
,
660 if (!IS_ERR_OR_NULL(result
) && result
->sk_state
== TCP_ESTABLISHED
)
663 /* Lookup redirect from BPF */
664 if (static_branch_unlikely(&bpf_sk_lookup_enabled
) &&
665 udptable
== net
->ipv4
.udp_table
) {
666 sk
= inet_lookup_run_sk_lookup(net
, IPPROTO_UDP
, skb
, sizeof(struct udphdr
),
667 saddr
, sport
, daddr
, hnum
, dif
,
675 /* Got non-wildcard socket or error on first lookup */
679 /* Lookup wildcard sockets */
680 hash2
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
681 hslot2
= udp_hashslot2(udptable
, hash2
);
683 result
= udp4_lib_lookup2(net
, saddr
, sport
,
684 htonl(INADDR_ANY
), hnum
, dif
, sdif
,
691 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
693 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
694 __be16 sport
, __be16 dport
,
695 struct udp_table
*udptable
)
697 const struct iphdr
*iph
= ip_hdr(skb
);
699 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
700 iph
->daddr
, dport
, inet_iif(skb
),
701 inet_sdif(skb
), udptable
, skb
);
704 struct sock
*udp4_lib_lookup_skb(const struct sk_buff
*skb
,
705 __be16 sport
, __be16 dport
)
707 const u16 offset
= NAPI_GRO_CB(skb
)->network_offsets
[skb
->encapsulation
];
708 const struct iphdr
*iph
= (struct iphdr
*)(skb
->data
+ offset
);
709 struct net
*net
= dev_net(skb
->dev
);
712 inet_get_iif_sdif(skb
, &iif
, &sdif
);
714 return __udp4_lib_lookup(net
, iph
->saddr
, sport
,
715 iph
->daddr
, dport
, iif
,
716 sdif
, net
->ipv4
.udp_table
, NULL
);
719 /* Must be called under rcu_read_lock().
720 * Does increment socket refcount.
722 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
723 struct sock
*udp4_lib_lookup(const struct net
*net
, __be32 saddr
, __be16 sport
,
724 __be32 daddr
, __be16 dport
, int dif
)
728 sk
= __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
729 dif
, 0, net
->ipv4
.udp_table
, NULL
);
730 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
734 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
737 static inline bool __udp_is_mcast_sock(struct net
*net
, const struct sock
*sk
,
738 __be16 loc_port
, __be32 loc_addr
,
739 __be16 rmt_port
, __be32 rmt_addr
,
740 int dif
, int sdif
, unsigned short hnum
)
742 const struct inet_sock
*inet
= inet_sk(sk
);
744 if (!net_eq(sock_net(sk
), net
) ||
745 udp_sk(sk
)->udp_port_hash
!= hnum
||
746 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
747 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
748 (inet
->inet_rcv_saddr
&& inet
->inet_rcv_saddr
!= loc_addr
) ||
749 ipv6_only_sock(sk
) ||
750 !udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
, dif
, sdif
))
752 if (!ip_mc_sf_allow(sk
, loc_addr
, rmt_addr
, dif
, sdif
))
757 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key
);
758 EXPORT_SYMBOL(udp_encap_needed_key
);
760 #if IS_ENABLED(CONFIG_IPV6)
761 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
762 EXPORT_SYMBOL(udpv6_encap_needed_key
);
765 void udp_encap_enable(void)
767 static_branch_inc(&udp_encap_needed_key
);
769 EXPORT_SYMBOL(udp_encap_enable
);
771 void udp_encap_disable(void)
773 static_branch_dec(&udp_encap_needed_key
);
775 EXPORT_SYMBOL(udp_encap_disable
);
777 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
778 * through error handlers in encapsulations looking for a match.
780 static int __udp4_lib_err_encap_no_sk(struct sk_buff
*skb
, u32 info
)
784 for (i
= 0; i
< MAX_IPTUN_ENCAP_OPS
; i
++) {
785 int (*handler
)(struct sk_buff
*skb
, u32 info
);
786 const struct ip_tunnel_encap_ops
*encap
;
788 encap
= rcu_dereference(iptun_encaps
[i
]);
791 handler
= encap
->err_handler
;
792 if (handler
&& !handler(skb
, info
))
799 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
800 * reversing source and destination port: this will match tunnels that force the
801 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
802 * lwtunnels might actually break this assumption by being configured with
803 * different destination ports on endpoints, in this case we won't be able to
804 * trace ICMP messages back to them.
806 * If this doesn't match any socket, probe tunnels with arbitrary destination
807 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
808 * we've sent packets to won't necessarily match the local destination port.
810 * Then ask the tunnel implementation to match the error against a valid
813 * Return an error if we can't find a match, the socket if we need further
814 * processing, zero otherwise.
816 static struct sock
*__udp4_lib_err_encap(struct net
*net
,
817 const struct iphdr
*iph
,
819 struct udp_table
*udptable
,
821 struct sk_buff
*skb
, u32 info
)
823 int (*lookup
)(struct sock
*sk
, struct sk_buff
*skb
);
824 int network_offset
, transport_offset
;
827 network_offset
= skb_network_offset(skb
);
828 transport_offset
= skb_transport_offset(skb
);
830 /* Network header needs to point to the outer IPv4 header inside ICMP */
831 skb_reset_network_header(skb
);
833 /* Transport header needs to point to the UDP header */
834 skb_set_transport_header(skb
, iph
->ihl
<< 2);
839 lookup
= READ_ONCE(up
->encap_err_lookup
);
840 if (lookup
&& lookup(sk
, skb
))
846 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->source
,
847 iph
->saddr
, uh
->dest
, skb
->dev
->ifindex
, 0,
852 lookup
= READ_ONCE(up
->encap_err_lookup
);
853 if (!lookup
|| lookup(sk
, skb
))
859 sk
= ERR_PTR(__udp4_lib_err_encap_no_sk(skb
, info
));
861 skb_set_transport_header(skb
, transport_offset
);
862 skb_set_network_header(skb
, network_offset
);
868 * This routine is called by the ICMP module when it gets some
869 * sort of error condition. If err < 0 then the socket should
870 * be closed and the error returned to the user. If err > 0
871 * it's just the icmp type << 8 | icmp code.
872 * Header points to the ip header of the error packet. We move
873 * on past this. Then (as it used to claim before adjustment)
874 * header points to the first 8 bytes of the udp header. We need
875 * to find the appropriate port.
878 int __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
880 struct inet_sock
*inet
;
881 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
882 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
883 const int type
= icmp_hdr(skb
)->type
;
884 const int code
= icmp_hdr(skb
)->code
;
889 struct net
*net
= dev_net(skb
->dev
);
891 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
892 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
,
893 inet_sdif(skb
), udptable
, NULL
);
895 if (!sk
|| READ_ONCE(udp_sk(sk
)->encap_type
)) {
896 /* No socket for error: try tunnels before discarding */
897 if (static_branch_unlikely(&udp_encap_needed_key
)) {
898 sk
= __udp4_lib_err_encap(net
, iph
, uh
, udptable
, sk
, skb
,
903 sk
= ERR_PTR(-ENOENT
);
906 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
919 case ICMP_TIME_EXCEEDED
:
922 case ICMP_SOURCE_QUENCH
:
924 case ICMP_PARAMETERPROB
:
928 case ICMP_DEST_UNREACH
:
929 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
930 ipv4_sk_update_pmtu(skb
, sk
, info
);
931 if (READ_ONCE(inet
->pmtudisc
) != IP_PMTUDISC_DONT
) {
939 if (code
<= NR_ICMP_UNREACH
) {
940 harderr
= icmp_err_convert
[code
].fatal
;
941 err
= icmp_err_convert
[code
].errno
;
945 ipv4_sk_redirect(skb
, sk
);
950 * RFC1122: OK. Passes ICMP errors back to application, as per
954 /* ...not for tunnels though: we don't have a sending socket */
955 if (udp_sk(sk
)->encap_err_rcv
)
956 udp_sk(sk
)->encap_err_rcv(sk
, skb
, err
, uh
->dest
, info
,
960 if (!inet_test_bit(RECVERR
, sk
)) {
961 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
964 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
972 int udp_err(struct sk_buff
*skb
, u32 info
)
974 return __udp4_lib_err(skb
, info
, dev_net(skb
->dev
)->ipv4
.udp_table
);
978 * Throw away all pending data and cancel the corking. Socket is locked.
980 void udp_flush_pending_frames(struct sock
*sk
)
982 struct udp_sock
*up
= udp_sk(sk
);
986 WRITE_ONCE(up
->pending
, 0);
987 ip_flush_pending_frames(sk
);
990 EXPORT_SYMBOL(udp_flush_pending_frames
);
993 * udp4_hwcsum - handle outgoing HW checksumming
994 * @skb: sk_buff containing the filled-in UDP header
995 * (checksum field must be zeroed out)
996 * @src: source IP address
997 * @dst: destination IP address
999 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
1001 struct udphdr
*uh
= udp_hdr(skb
);
1002 int offset
= skb_transport_offset(skb
);
1003 int len
= skb
->len
- offset
;
1007 if (!skb_has_frag_list(skb
)) {
1009 * Only one fragment on the socket.
1011 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1012 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1013 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
1016 struct sk_buff
*frags
;
1019 * HW-checksum won't work as there are two or more
1020 * fragments on the socket so that all csums of sk_buffs
1021 * should be together
1023 skb_walk_frags(skb
, frags
) {
1024 csum
= csum_add(csum
, frags
->csum
);
1028 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
1029 skb
->ip_summed
= CHECKSUM_NONE
;
1031 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
1033 uh
->check
= CSUM_MANGLED_0
;
1036 EXPORT_SYMBOL_GPL(udp4_hwcsum
);
1038 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
1039 * for the simple case like when setting the checksum for a UDP tunnel.
1041 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
1042 __be32 saddr
, __be32 daddr
, int len
)
1044 struct udphdr
*uh
= udp_hdr(skb
);
1048 } else if (skb_is_gso(skb
)) {
1049 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
1050 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1052 uh
->check
= udp_v4_check(len
, saddr
, daddr
, lco_csum(skb
));
1054 uh
->check
= CSUM_MANGLED_0
;
1056 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1057 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1058 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1059 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
1062 EXPORT_SYMBOL(udp_set_csum
);
1064 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
,
1065 struct inet_cork
*cork
)
1067 struct sock
*sk
= skb
->sk
;
1068 struct inet_sock
*inet
= inet_sk(sk
);
1071 int is_udplite
= IS_UDPLITE(sk
);
1072 int offset
= skb_transport_offset(skb
);
1073 int len
= skb
->len
- offset
;
1074 int datalen
= len
- sizeof(*uh
);
1078 * Create a UDP header
1081 uh
->source
= inet
->inet_sport
;
1082 uh
->dest
= fl4
->fl4_dport
;
1083 uh
->len
= htons(len
);
1086 if (cork
->gso_size
) {
1087 const int hlen
= skb_network_header_len(skb
) +
1088 sizeof(struct udphdr
);
1090 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
1094 if (datalen
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
1098 if (sk
->sk_no_check_tx
) {
1102 if (is_udplite
|| dst_xfrm(skb_dst(skb
))) {
1107 if (datalen
> cork
->gso_size
) {
1108 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
1109 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
1110 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(datalen
,
1113 /* Don't checksum the payload, skb will get segmented */
1118 if (is_udplite
) /* UDP-Lite */
1119 csum
= udplite_csum(skb
);
1121 else if (sk
->sk_no_check_tx
) { /* UDP csum off */
1123 skb
->ip_summed
= CHECKSUM_NONE
;
1126 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
1129 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
1133 csum
= udp_csum(skb
);
1135 /* add protocol-dependent pseudo-header */
1136 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
1137 sk
->sk_protocol
, csum
);
1139 uh
->check
= CSUM_MANGLED_0
;
1142 err
= ip_send_skb(sock_net(sk
), skb
);
1144 if (err
== -ENOBUFS
&&
1145 !inet_test_bit(RECVERR
, sk
)) {
1146 UDP_INC_STATS(sock_net(sk
),
1147 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1151 UDP_INC_STATS(sock_net(sk
),
1152 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
1157 * Push out all pending data as one UDP datagram. Socket is locked.
1159 int udp_push_pending_frames(struct sock
*sk
)
1161 struct udp_sock
*up
= udp_sk(sk
);
1162 struct inet_sock
*inet
= inet_sk(sk
);
1163 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
1164 struct sk_buff
*skb
;
1167 skb
= ip_finish_skb(sk
, fl4
);
1171 err
= udp_send_skb(skb
, fl4
, &inet
->cork
.base
);
1175 WRITE_ONCE(up
->pending
, 0);
1178 EXPORT_SYMBOL(udp_push_pending_frames
);
1180 static int __udp_cmsg_send(struct cmsghdr
*cmsg
, u16
*gso_size
)
1182 switch (cmsg
->cmsg_type
) {
1184 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(__u16
)))
1186 *gso_size
= *(__u16
*)CMSG_DATA(cmsg
);
1193 int udp_cmsg_send(struct sock
*sk
, struct msghdr
*msg
, u16
*gso_size
)
1195 struct cmsghdr
*cmsg
;
1196 bool need_ip
= false;
1199 for_each_cmsghdr(cmsg
, msg
) {
1200 if (!CMSG_OK(msg
, cmsg
))
1203 if (cmsg
->cmsg_level
!= SOL_UDP
) {
1208 err
= __udp_cmsg_send(cmsg
, gso_size
);
1215 EXPORT_SYMBOL_GPL(udp_cmsg_send
);
1217 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1219 struct inet_sock
*inet
= inet_sk(sk
);
1220 struct udp_sock
*up
= udp_sk(sk
);
1221 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
1222 struct flowi4 fl4_stack
;
1225 struct ipcm_cookie ipc
;
1226 struct rtable
*rt
= NULL
;
1229 __be32 daddr
, faddr
, saddr
;
1232 int err
, is_udplite
= IS_UDPLITE(sk
);
1233 int corkreq
= udp_test_bit(CORK
, sk
) || msg
->msg_flags
& MSG_MORE
;
1234 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1235 struct sk_buff
*skb
;
1236 struct ip_options_data opt_copy
;
1246 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
1249 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1251 fl4
= &inet
->cork
.fl
.u
.ip4
;
1252 if (READ_ONCE(up
->pending
)) {
1254 * There are pending frames.
1255 * The socket lock must be held while it's corked.
1258 if (likely(up
->pending
)) {
1259 if (unlikely(up
->pending
!= AF_INET
)) {
1263 goto do_append_data
;
1267 ulen
+= sizeof(struct udphdr
);
1270 * Get and verify the address.
1273 if (msg
->msg_namelen
< sizeof(*usin
))
1275 if (usin
->sin_family
!= AF_INET
) {
1276 if (usin
->sin_family
!= AF_UNSPEC
)
1277 return -EAFNOSUPPORT
;
1280 daddr
= usin
->sin_addr
.s_addr
;
1281 dport
= usin
->sin_port
;
1285 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1286 return -EDESTADDRREQ
;
1287 daddr
= inet
->inet_daddr
;
1288 dport
= inet
->inet_dport
;
1289 /* Open fast path for connected socket.
1290 Route will not be used, if at least one option is set.
1295 ipcm_init_sk(&ipc
, inet
);
1296 ipc
.gso_size
= READ_ONCE(up
->gso_size
);
1298 if (msg
->msg_controllen
) {
1299 err
= udp_cmsg_send(sk
, msg
, &ipc
.gso_size
);
1301 err
= ip_cmsg_send(sk
, msg
, &ipc
,
1302 sk
->sk_family
== AF_INET6
);
1305 if (unlikely(err
< 0)) {
1313 struct ip_options_rcu
*inet_opt
;
1316 inet_opt
= rcu_dereference(inet
->inet_opt
);
1318 memcpy(&opt_copy
, inet_opt
,
1319 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
1320 ipc
.opt
= &opt_copy
.opt
;
1325 if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG
) && !connected
) {
1326 err
= BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk
,
1327 (struct sockaddr
*)usin
,
1333 if (usin
->sin_port
== 0) {
1334 /* BPF program set invalid port. Reject it. */
1338 daddr
= usin
->sin_addr
.s_addr
;
1339 dport
= usin
->sin_port
;
1344 ipc
.addr
= faddr
= daddr
;
1346 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
1351 faddr
= ipc
.opt
->opt
.faddr
;
1354 tos
= get_rttos(&ipc
, inet
);
1355 scope
= ip_sendmsg_scope(inet
, &ipc
, msg
);
1356 if (scope
== RT_SCOPE_LINK
)
1359 uc_index
= READ_ONCE(inet
->uc_index
);
1360 if (ipv4_is_multicast(daddr
)) {
1361 if (!ipc
.oif
|| netif_index_is_l3_master(sock_net(sk
), ipc
.oif
))
1362 ipc
.oif
= READ_ONCE(inet
->mc_index
);
1364 saddr
= READ_ONCE(inet
->mc_addr
);
1366 } else if (!ipc
.oif
) {
1368 } else if (ipv4_is_lbcast(daddr
) && uc_index
) {
1369 /* oif is set, packet is to local broadcast and
1370 * uc_index is set. oif is most likely set
1371 * by sk_bound_dev_if. If uc_index != oif check if the
1372 * oif is an L3 master and uc_index is an L3 slave.
1373 * If so, we want to allow the send using the uc_index.
1375 if (ipc
.oif
!= uc_index
&&
1376 ipc
.oif
== l3mdev_master_ifindex_by_index(sock_net(sk
),
1383 rt
= dst_rtable(sk_dst_check(sk
, 0));
1386 struct net
*net
= sock_net(sk
);
1387 __u8 flow_flags
= inet_sk_flowi_flags(sk
);
1391 flowi4_init_output(fl4
, ipc
.oif
, ipc
.sockc
.mark
, tos
, scope
,
1392 sk
->sk_protocol
, flow_flags
, faddr
, saddr
,
1393 dport
, inet
->inet_sport
, sk
->sk_uid
);
1395 security_sk_classify_flow(sk
, flowi4_to_flowi_common(fl4
));
1396 rt
= ip_route_output_flow(net
, fl4
, sk
);
1400 if (err
== -ENETUNREACH
)
1401 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
1406 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
1407 !sock_flag(sk
, SOCK_BROADCAST
))
1410 sk_dst_set(sk
, dst_clone(&rt
->dst
));
1413 if (msg
->msg_flags
&MSG_CONFIRM
)
1419 daddr
= ipc
.addr
= fl4
->daddr
;
1421 /* Lockless fast path for the non-corking case. */
1423 struct inet_cork cork
;
1425 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
, ulen
,
1426 sizeof(struct udphdr
), &ipc
, &rt
,
1427 &cork
, msg
->msg_flags
);
1429 if (!IS_ERR_OR_NULL(skb
))
1430 err
= udp_send_skb(skb
, fl4
, &cork
);
1435 if (unlikely(up
->pending
)) {
1436 /* The socket is already corked while preparing it. */
1437 /* ... which is an evident application bug. --ANK */
1440 net_dbg_ratelimited("socket already corked\n");
1445 * Now cork the socket to pend data.
1447 fl4
= &inet
->cork
.fl
.u
.ip4
;
1450 fl4
->fl4_dport
= dport
;
1451 fl4
->fl4_sport
= inet
->inet_sport
;
1452 WRITE_ONCE(up
->pending
, AF_INET
);
1456 err
= ip_append_data(sk
, fl4
, getfrag
, msg
, ulen
,
1457 sizeof(struct udphdr
), &ipc
, &rt
,
1458 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1460 udp_flush_pending_frames(sk
);
1462 err
= udp_push_pending_frames(sk
);
1463 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1464 WRITE_ONCE(up
->pending
, 0);
1475 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1476 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1477 * we don't have a good statistic (IpOutDiscards but it can be too many
1478 * things). We could add another new stat but at least for now that
1479 * seems like overkill.
1481 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1482 UDP_INC_STATS(sock_net(sk
),
1483 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1488 if (msg
->msg_flags
& MSG_PROBE
)
1489 dst_confirm_neigh(&rt
->dst
, &fl4
->daddr
);
1490 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1491 goto back_from_confirm
;
1495 EXPORT_SYMBOL(udp_sendmsg
);
1497 void udp_splice_eof(struct socket
*sock
)
1499 struct sock
*sk
= sock
->sk
;
1500 struct udp_sock
*up
= udp_sk(sk
);
1502 if (!READ_ONCE(up
->pending
) || udp_test_bit(CORK
, sk
))
1506 if (up
->pending
&& !udp_test_bit(CORK
, sk
))
1507 udp_push_pending_frames(sk
);
1510 EXPORT_SYMBOL_GPL(udp_splice_eof
);
1512 #define UDP_SKB_IS_STATELESS 0x80000000
1514 /* all head states (dst, sk, nf conntrack) except skb extensions are
1515 * cleared by udp_rcv().
1517 * We need to preserve secpath, if present, to eventually process
1518 * IP_CMSG_PASSSEC at recvmsg() time.
1520 * Other extensions can be cleared.
1522 static bool udp_try_make_stateless(struct sk_buff
*skb
)
1524 if (!skb_has_extensions(skb
))
1527 if (!secpath_exists(skb
)) {
1535 static void udp_set_dev_scratch(struct sk_buff
*skb
)
1537 struct udp_dev_scratch
*scratch
= udp_skb_scratch(skb
);
1539 BUILD_BUG_ON(sizeof(struct udp_dev_scratch
) > sizeof(long));
1540 scratch
->_tsize_state
= skb
->truesize
;
1541 #if BITS_PER_LONG == 64
1542 scratch
->len
= skb
->len
;
1543 scratch
->csum_unnecessary
= !!skb_csum_unnecessary(skb
);
1544 scratch
->is_linear
= !skb_is_nonlinear(skb
);
1546 if (udp_try_make_stateless(skb
))
1547 scratch
->_tsize_state
|= UDP_SKB_IS_STATELESS
;
1550 static void udp_skb_csum_unnecessary_set(struct sk_buff
*skb
)
1552 /* We come here after udp_lib_checksum_complete() returned 0.
1553 * This means that __skb_checksum_complete() might have
1554 * set skb->csum_valid to 1.
1555 * On 64bit platforms, we can set csum_unnecessary
1556 * to true, but only if the skb is not shared.
1558 #if BITS_PER_LONG == 64
1559 if (!skb_shared(skb
))
1560 udp_skb_scratch(skb
)->csum_unnecessary
= true;
1564 static int udp_skb_truesize(struct sk_buff
*skb
)
1566 return udp_skb_scratch(skb
)->_tsize_state
& ~UDP_SKB_IS_STATELESS
;
1569 static bool udp_skb_has_head_state(struct sk_buff
*skb
)
1571 return !(udp_skb_scratch(skb
)->_tsize_state
& UDP_SKB_IS_STATELESS
);
1574 /* fully reclaim rmem/fwd memory allocated for skb */
1575 static void udp_rmem_release(struct sock
*sk
, int size
, int partial
,
1576 bool rx_queue_lock_held
)
1578 struct udp_sock
*up
= udp_sk(sk
);
1579 struct sk_buff_head
*sk_queue
;
1582 if (likely(partial
)) {
1583 up
->forward_deficit
+= size
;
1584 size
= up
->forward_deficit
;
1585 if (size
< READ_ONCE(up
->forward_threshold
) &&
1586 !skb_queue_empty(&up
->reader_queue
))
1589 size
+= up
->forward_deficit
;
1591 up
->forward_deficit
= 0;
1593 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1594 * if the called don't held it already
1596 sk_queue
= &sk
->sk_receive_queue
;
1597 if (!rx_queue_lock_held
)
1598 spin_lock(&sk_queue
->lock
);
1601 sk_forward_alloc_add(sk
, size
);
1602 amt
= (sk
->sk_forward_alloc
- partial
) & ~(PAGE_SIZE
- 1);
1603 sk_forward_alloc_add(sk
, -amt
);
1606 __sk_mem_reduce_allocated(sk
, amt
>> PAGE_SHIFT
);
1608 atomic_sub(size
, &sk
->sk_rmem_alloc
);
1610 /* this can save us from acquiring the rx queue lock on next receive */
1611 skb_queue_splice_tail_init(sk_queue
, &up
->reader_queue
);
1613 if (!rx_queue_lock_held
)
1614 spin_unlock(&sk_queue
->lock
);
1617 /* Note: called with reader_queue.lock held.
1618 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1619 * This avoids a cache line miss while receive_queue lock is held.
1620 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1622 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
)
1624 prefetch(&skb
->data
);
1625 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, false);
1627 EXPORT_SYMBOL(udp_skb_destructor
);
1629 /* as above, but the caller held the rx queue lock, too */
1630 static void udp_skb_dtor_locked(struct sock
*sk
, struct sk_buff
*skb
)
1632 prefetch(&skb
->data
);
1633 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, true);
1636 /* Idea of busylocks is to let producers grab an extra spinlock
1637 * to relieve pressure on the receive_queue spinlock shared by consumer.
1638 * Under flood, this means that only one producer can be in line
1639 * trying to acquire the receive_queue spinlock.
1640 * These busylock can be allocated on a per cpu manner, instead of a
1641 * per socket one (that would consume a cache line per socket)
1643 static int udp_busylocks_log __read_mostly
;
1644 static spinlock_t
*udp_busylocks __read_mostly
;
1646 static spinlock_t
*busylock_acquire(void *ptr
)
1650 busy
= udp_busylocks
+ hash_ptr(ptr
, udp_busylocks_log
);
1655 static void busylock_release(spinlock_t
*busy
)
1661 static int udp_rmem_schedule(struct sock
*sk
, int size
)
1665 delta
= size
- sk
->sk_forward_alloc
;
1666 if (delta
> 0 && !__sk_mem_schedule(sk
, delta
, SK_MEM_RECV
))
1672 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
)
1674 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
1675 int rmem
, err
= -ENOMEM
;
1676 spinlock_t
*busy
= NULL
;
1677 bool becomes_readable
;
1680 /* Immediately drop when the receive queue is full.
1681 * Always allow at least one packet.
1683 rmem
= atomic_read(&sk
->sk_rmem_alloc
);
1684 rcvbuf
= READ_ONCE(sk
->sk_rcvbuf
);
1688 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1689 * having linear skbs :
1690 * - Reduce memory overhead and thus increase receive queue capacity
1691 * - Less cache line misses at copyout() time
1692 * - Less work at consume_skb() (less alien page frag freeing)
1694 if (rmem
> (rcvbuf
>> 1)) {
1697 busy
= busylock_acquire(sk
);
1699 size
= skb
->truesize
;
1700 udp_set_dev_scratch(skb
);
1702 atomic_add(size
, &sk
->sk_rmem_alloc
);
1704 spin_lock(&list
->lock
);
1705 err
= udp_rmem_schedule(sk
, size
);
1707 spin_unlock(&list
->lock
);
1711 sk_forward_alloc_add(sk
, -size
);
1713 /* no need to setup a destructor, we will explicitly release the
1714 * forward allocated memory on dequeue
1716 sock_skb_set_dropcount(sk
, skb
);
1718 becomes_readable
= skb_queue_empty(list
);
1719 __skb_queue_tail(list
, skb
);
1720 spin_unlock(&list
->lock
);
1722 if (!sock_flag(sk
, SOCK_DEAD
)) {
1723 if (becomes_readable
||
1724 sk
->sk_data_ready
!= sock_def_readable
||
1725 READ_ONCE(sk
->sk_peek_off
) >= 0)
1726 INDIRECT_CALL_1(sk
->sk_data_ready
,
1727 sock_def_readable
, sk
);
1729 sk_wake_async_rcu(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
1731 busylock_release(busy
);
1735 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
1738 atomic_inc(&sk
->sk_drops
);
1739 busylock_release(busy
);
1742 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb
);
1744 void udp_destruct_common(struct sock
*sk
)
1746 /* reclaim completely the forward allocated memory */
1747 struct udp_sock
*up
= udp_sk(sk
);
1748 unsigned int total
= 0;
1749 struct sk_buff
*skb
;
1751 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &up
->reader_queue
);
1752 while ((skb
= __skb_dequeue(&up
->reader_queue
)) != NULL
) {
1753 total
+= skb
->truesize
;
1756 udp_rmem_release(sk
, total
, 0, true);
1758 EXPORT_SYMBOL_GPL(udp_destruct_common
);
1760 static void udp_destruct_sock(struct sock
*sk
)
1762 udp_destruct_common(sk
);
1763 inet_sock_destruct(sk
);
1766 int udp_init_sock(struct sock
*sk
)
1768 udp_lib_init_sock(sk
);
1769 sk
->sk_destruct
= udp_destruct_sock
;
1770 set_bit(SOCK_SUPPORT_ZC
, &sk
->sk_socket
->flags
);
1774 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1776 if (unlikely(READ_ONCE(udp_sk(sk
)->peeking_with_offset
)))
1777 sk_peek_offset_bwd(sk
, len
);
1779 if (!skb_unref(skb
))
1782 /* In the more common cases we cleared the head states previously,
1783 * see __udp_queue_rcv_skb().
1785 if (unlikely(udp_skb_has_head_state(skb
)))
1786 skb_release_head_state(skb
);
1787 __consume_stateless_skb(skb
);
1789 EXPORT_SYMBOL_GPL(skb_consume_udp
);
1791 static struct sk_buff
*__first_packet_length(struct sock
*sk
,
1792 struct sk_buff_head
*rcvq
,
1795 struct sk_buff
*skb
;
1797 while ((skb
= skb_peek(rcvq
)) != NULL
) {
1798 if (udp_lib_checksum_complete(skb
)) {
1799 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1801 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
1803 atomic_inc(&sk
->sk_drops
);
1804 __skb_unlink(skb
, rcvq
);
1805 *total
+= skb
->truesize
;
1808 udp_skb_csum_unnecessary_set(skb
);
1816 * first_packet_length - return length of first packet in receive queue
1819 * Drops all bad checksum frames, until a valid one is found.
1820 * Returns the length of found skb, or -1 if none is found.
1822 static int first_packet_length(struct sock
*sk
)
1824 struct sk_buff_head
*rcvq
= &udp_sk(sk
)->reader_queue
;
1825 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1826 struct sk_buff
*skb
;
1830 spin_lock_bh(&rcvq
->lock
);
1831 skb
= __first_packet_length(sk
, rcvq
, &total
);
1832 if (!skb
&& !skb_queue_empty_lockless(sk_queue
)) {
1833 spin_lock(&sk_queue
->lock
);
1834 skb_queue_splice_tail_init(sk_queue
, rcvq
);
1835 spin_unlock(&sk_queue
->lock
);
1837 skb
= __first_packet_length(sk
, rcvq
, &total
);
1839 res
= skb
? skb
->len
: -1;
1841 udp_rmem_release(sk
, total
, 1, false);
1842 spin_unlock_bh(&rcvq
->lock
);
1847 * IOCTL requests applicable to the UDP protocol
1850 int udp_ioctl(struct sock
*sk
, int cmd
, int *karg
)
1855 *karg
= sk_wmem_alloc_get(sk
);
1861 *karg
= max_t(int, 0, first_packet_length(sk
));
1866 return -ENOIOCTLCMD
;
1871 EXPORT_SYMBOL(udp_ioctl
);
1873 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
,
1876 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1877 struct sk_buff_head
*queue
;
1878 struct sk_buff
*last
;
1882 queue
= &udp_sk(sk
)->reader_queue
;
1883 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1885 struct sk_buff
*skb
;
1887 error
= sock_error(sk
);
1893 spin_lock_bh(&queue
->lock
);
1894 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
, off
,
1897 if (!(flags
& MSG_PEEK
))
1898 udp_skb_destructor(sk
, skb
);
1899 spin_unlock_bh(&queue
->lock
);
1903 if (skb_queue_empty_lockless(sk_queue
)) {
1904 spin_unlock_bh(&queue
->lock
);
1908 /* refill the reader queue and walk it again
1909 * keep both queues locked to avoid re-acquiring
1910 * the sk_receive_queue lock if fwd memory scheduling
1913 spin_lock(&sk_queue
->lock
);
1914 skb_queue_splice_tail_init(sk_queue
, queue
);
1916 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
, off
,
1918 if (skb
&& !(flags
& MSG_PEEK
))
1919 udp_skb_dtor_locked(sk
, skb
);
1920 spin_unlock(&sk_queue
->lock
);
1921 spin_unlock_bh(&queue
->lock
);
1926 if (!sk_can_busy_loop(sk
))
1929 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
1930 } while (!skb_queue_empty_lockless(sk_queue
));
1932 /* sk_queue is empty, reader_queue may contain peeked packets */
1934 !__skb_wait_for_more_packets(sk
, &sk
->sk_receive_queue
,
1936 (struct sk_buff
*)sk_queue
));
1941 EXPORT_SYMBOL(__skb_recv_udp
);
1943 int udp_read_skb(struct sock
*sk
, skb_read_actor_t recv_actor
)
1945 struct sk_buff
*skb
;
1949 skb
= skb_recv_udp(sk
, MSG_DONTWAIT
, &err
);
1953 if (udp_lib_checksum_complete(skb
)) {
1954 int is_udplite
= IS_UDPLITE(sk
);
1955 struct net
*net
= sock_net(sk
);
1957 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, is_udplite
);
1958 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, is_udplite
);
1959 atomic_inc(&sk
->sk_drops
);
1964 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb
, sk
));
1965 return recv_actor(sk
, skb
);
1967 EXPORT_SYMBOL(udp_read_skb
);
1970 * This should be easy, if there is something there we
1971 * return it, otherwise we block.
1974 int udp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int flags
,
1977 struct inet_sock
*inet
= inet_sk(sk
);
1978 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
1979 struct sk_buff
*skb
;
1980 unsigned int ulen
, copied
;
1981 int off
, err
, peeking
= flags
& MSG_PEEK
;
1982 int is_udplite
= IS_UDPLITE(sk
);
1983 bool checksum_valid
= false;
1985 if (flags
& MSG_ERRQUEUE
)
1986 return ip_recv_error(sk
, msg
, len
, addr_len
);
1989 off
= sk_peek_offset(sk
, flags
);
1990 skb
= __skb_recv_udp(sk
, flags
, &off
, &err
);
1994 ulen
= udp_skb_len(skb
);
1996 if (copied
> ulen
- off
)
1997 copied
= ulen
- off
;
1998 else if (copied
< ulen
)
1999 msg
->msg_flags
|= MSG_TRUNC
;
2002 * If checksum is needed at all, try to do it while copying the
2003 * data. If the data is truncated, or if we only want a partial
2004 * coverage checksum (UDP-Lite), do it before the copy.
2007 if (copied
< ulen
|| peeking
||
2008 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
2009 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
2010 !__udp_lib_checksum_complete(skb
);
2011 if (!checksum_valid
)
2015 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
2016 if (udp_skb_is_linear(skb
))
2017 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
2019 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
2021 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
2027 if (unlikely(err
)) {
2029 atomic_inc(&sk
->sk_drops
);
2030 UDP_INC_STATS(sock_net(sk
),
2031 UDP_MIB_INERRORS
, is_udplite
);
2038 UDP_INC_STATS(sock_net(sk
),
2039 UDP_MIB_INDATAGRAMS
, is_udplite
);
2041 sock_recv_cmsgs(msg
, sk
, skb
);
2043 /* Copy the address. */
2045 sin
->sin_family
= AF_INET
;
2046 sin
->sin_port
= udp_hdr(skb
)->source
;
2047 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
2048 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
2049 *addr_len
= sizeof(*sin
);
2051 BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk
,
2052 (struct sockaddr
*)sin
,
2056 if (udp_test_bit(GRO_ENABLED
, sk
))
2057 udp_cmsg_recv(msg
, sk
, skb
);
2059 if (inet_cmsg_flags(inet
))
2060 ip_cmsg_recv_offset(msg
, sk
, skb
, sizeof(struct udphdr
), off
);
2063 if (flags
& MSG_TRUNC
)
2066 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
2070 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
2071 udp_skb_destructor
)) {
2072 UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
2073 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2077 /* starting over for a new packet, but check if we need to yield */
2079 msg
->msg_flags
&= ~MSG_TRUNC
;
2083 int udp_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
2085 /* This check is replicated from __ip4_datagram_connect() and
2086 * intended to prevent BPF program called below from accessing bytes
2087 * that are out of the bound specified by user in addr_len.
2089 if (addr_len
< sizeof(struct sockaddr_in
))
2092 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk
, uaddr
, &addr_len
);
2094 EXPORT_SYMBOL(udp_pre_connect
);
2096 static int udp_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
2101 res
= __ip4_datagram_connect(sk
, uaddr
, addr_len
);
2108 int __udp_disconnect(struct sock
*sk
, int flags
)
2110 struct inet_sock
*inet
= inet_sk(sk
);
2112 * 1003.1g - break association.
2115 sk
->sk_state
= TCP_CLOSE
;
2116 inet
->inet_daddr
= 0;
2117 inet
->inet_dport
= 0;
2118 sock_rps_reset_rxhash(sk
);
2119 sk
->sk_bound_dev_if
= 0;
2120 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
)) {
2121 inet_reset_saddr(sk
);
2122 if (sk
->sk_prot
->rehash
&&
2123 (sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
2124 sk
->sk_prot
->rehash(sk
);
2127 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
2128 sk
->sk_prot
->unhash(sk
);
2129 inet
->inet_sport
= 0;
2134 EXPORT_SYMBOL(__udp_disconnect
);
2136 int udp_disconnect(struct sock
*sk
, int flags
)
2139 __udp_disconnect(sk
, flags
);
2143 EXPORT_SYMBOL(udp_disconnect
);
2145 void udp_lib_unhash(struct sock
*sk
)
2147 if (sk_hashed(sk
)) {
2148 struct udp_table
*udptable
= udp_get_table_prot(sk
);
2149 struct udp_hslot
*hslot
, *hslot2
;
2151 hslot
= udp_hashslot(udptable
, sock_net(sk
),
2152 udp_sk(sk
)->udp_port_hash
);
2153 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
2155 spin_lock_bh(&hslot
->lock
);
2156 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
2157 reuseport_detach_sock(sk
);
2158 if (sk_del_node_init_rcu(sk
)) {
2160 inet_sk(sk
)->inet_num
= 0;
2161 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
2163 spin_lock(&hslot2
->lock
);
2164 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
2166 spin_unlock(&hslot2
->lock
);
2168 udp_unhash4(udptable
, sk
);
2170 spin_unlock_bh(&hslot
->lock
);
2173 EXPORT_SYMBOL(udp_lib_unhash
);
2176 * inet_rcv_saddr was changed, we must rehash secondary hash
2178 void udp_lib_rehash(struct sock
*sk
, u16 newhash
, u16 newhash4
)
2180 if (sk_hashed(sk
)) {
2181 struct udp_table
*udptable
= udp_get_table_prot(sk
);
2182 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
2184 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
2185 nhslot2
= udp_hashslot2(udptable
, newhash
);
2186 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
2188 if (hslot2
!= nhslot2
||
2189 rcu_access_pointer(sk
->sk_reuseport_cb
)) {
2190 hslot
= udp_hashslot(udptable
, sock_net(sk
),
2191 udp_sk(sk
)->udp_port_hash
);
2192 /* we must lock primary chain too */
2193 spin_lock_bh(&hslot
->lock
);
2194 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
2195 reuseport_detach_sock(sk
);
2197 if (hslot2
!= nhslot2
) {
2198 spin_lock(&hslot2
->lock
);
2199 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
2201 spin_unlock(&hslot2
->lock
);
2203 spin_lock(&nhslot2
->lock
);
2204 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
2207 spin_unlock(&nhslot2
->lock
);
2210 if (udp_hashed4(sk
)) {
2211 udp_rehash4(udptable
, sk
, newhash4
);
2213 if (hslot2
!= nhslot2
) {
2214 spin_lock(&hslot2
->lock
);
2215 udp_hash4_dec(hslot2
);
2216 spin_unlock(&hslot2
->lock
);
2218 spin_lock(&nhslot2
->lock
);
2219 udp_hash4_inc(nhslot2
);
2220 spin_unlock(&nhslot2
->lock
);
2223 spin_unlock_bh(&hslot
->lock
);
2227 EXPORT_SYMBOL(udp_lib_rehash
);
2229 void udp_v4_rehash(struct sock
*sk
)
2231 u16 new_hash
= ipv4_portaddr_hash(sock_net(sk
),
2232 inet_sk(sk
)->inet_rcv_saddr
,
2233 inet_sk(sk
)->inet_num
);
2234 u16 new_hash4
= udp_ehashfn(sock_net(sk
),
2235 sk
->sk_rcv_saddr
, sk
->sk_num
,
2236 sk
->sk_daddr
, sk
->sk_dport
);
2238 udp_lib_rehash(sk
, new_hash
, new_hash4
);
2241 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
2245 if (inet_sk(sk
)->inet_daddr
) {
2246 sock_rps_save_rxhash(sk
, skb
);
2247 sk_mark_napi_id(sk
, skb
);
2248 sk_incoming_cpu_update(sk
);
2250 sk_mark_napi_id_once(sk
, skb
);
2253 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
2255 int is_udplite
= IS_UDPLITE(sk
);
2258 /* Note that an ENOMEM error is charged twice */
2259 if (rc
== -ENOMEM
) {
2260 UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
2262 drop_reason
= SKB_DROP_REASON_SOCKET_RCVBUFF
;
2264 UDP_INC_STATS(sock_net(sk
), UDP_MIB_MEMERRORS
,
2266 drop_reason
= SKB_DROP_REASON_PROTO_MEM
;
2268 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2269 trace_udp_fail_queue_rcv_skb(rc
, sk
, skb
);
2270 sk_skb_reason_drop(sk
, skb
, drop_reason
);
2280 * >0: "udp encap" protocol resubmission
2282 * Note that in the success and error cases, the skb is assumed to
2283 * have either been requeued or freed.
2285 static int udp_queue_rcv_one_skb(struct sock
*sk
, struct sk_buff
*skb
)
2287 int drop_reason
= SKB_DROP_REASON_NOT_SPECIFIED
;
2288 struct udp_sock
*up
= udp_sk(sk
);
2289 int is_udplite
= IS_UDPLITE(sk
);
2292 * Charge it to the socket, dropping if the queue is full.
2294 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
)) {
2295 drop_reason
= SKB_DROP_REASON_XFRM_POLICY
;
2300 if (static_branch_unlikely(&udp_encap_needed_key
) &&
2301 READ_ONCE(up
->encap_type
)) {
2302 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
2305 * This is an encapsulation socket so pass the skb to
2306 * the socket's udp_encap_rcv() hook. Otherwise, just
2307 * fall through and pass this up the UDP socket.
2308 * up->encap_rcv() returns the following value:
2309 * =0 if skb was successfully passed to the encap
2310 * handler or was discarded by it.
2311 * >0 if skb should be passed on to UDP.
2312 * <0 if skb should be resubmitted as proto -N
2315 /* if we're overly short, let UDP handle it */
2316 encap_rcv
= READ_ONCE(up
->encap_rcv
);
2320 /* Verify checksum before giving to encap */
2321 if (udp_lib_checksum_complete(skb
))
2324 ret
= encap_rcv(sk
, skb
);
2326 __UDP_INC_STATS(sock_net(sk
),
2327 UDP_MIB_INDATAGRAMS
,
2333 /* FALLTHROUGH -- it's a UDP Packet */
2337 * UDP-Lite specific tests, ignored on UDP sockets
2339 if (udp_test_bit(UDPLITE_RECV_CC
, sk
) && UDP_SKB_CB(skb
)->partial_cov
) {
2340 u16 pcrlen
= READ_ONCE(up
->pcrlen
);
2343 * MIB statistics other than incrementing the error count are
2344 * disabled for the following two types of errors: these depend
2345 * on the application settings, not on the functioning of the
2346 * protocol stack as such.
2348 * RFC 3828 here recommends (sec 3.3): "There should also be a
2349 * way ... to ... at least let the receiving application block
2350 * delivery of packets with coverage values less than a value
2351 * provided by the application."
2353 if (pcrlen
== 0) { /* full coverage was set */
2354 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2355 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
2358 /* The next case involves violating the min. coverage requested
2359 * by the receiver. This is subtle: if receiver wants x and x is
2360 * greater than the buffersize/MTU then receiver will complain
2361 * that it wants x while sender emits packets of smaller size y.
2362 * Therefore the above ...()->partial_cov statement is essential.
2364 if (UDP_SKB_CB(skb
)->cscov
< pcrlen
) {
2365 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2366 UDP_SKB_CB(skb
)->cscov
, pcrlen
);
2371 prefetch(&sk
->sk_rmem_alloc
);
2372 if (rcu_access_pointer(sk
->sk_filter
) &&
2373 udp_lib_checksum_complete(skb
))
2376 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
))) {
2377 drop_reason
= SKB_DROP_REASON_SOCKET_FILTER
;
2381 udp_csum_pull_header(skb
);
2383 ipv4_pktinfo_prepare(sk
, skb
, true);
2384 return __udp_queue_rcv_skb(sk
, skb
);
2387 drop_reason
= SKB_DROP_REASON_UDP_CSUM
;
2388 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
2390 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2391 atomic_inc(&sk
->sk_drops
);
2392 sk_skb_reason_drop(sk
, skb
, drop_reason
);
2396 static int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
2398 struct sk_buff
*next
, *segs
;
2401 if (likely(!udp_unexpected_gso(sk
, skb
)))
2402 return udp_queue_rcv_one_skb(sk
, skb
);
2404 BUILD_BUG_ON(sizeof(struct udp_skb_cb
) > SKB_GSO_CB_OFFSET
);
2405 __skb_push(skb
, -skb_mac_offset(skb
));
2406 segs
= udp_rcv_segment(sk
, skb
, true);
2407 skb_list_walk_safe(segs
, skb
, next
) {
2408 __skb_pull(skb
, skb_transport_offset(skb
));
2410 udp_post_segment_fix_csum(skb
);
2411 ret
= udp_queue_rcv_one_skb(sk
, skb
);
2413 ip_protocol_deliver_rcu(dev_net(skb
->dev
), skb
, ret
);
2418 /* For TCP sockets, sk_rx_dst is protected by socket lock
2419 * For UDP, we use xchg() to guard against concurrent changes.
2421 bool udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
2423 struct dst_entry
*old
;
2425 if (dst_hold_safe(dst
)) {
2426 old
= unrcu_pointer(xchg(&sk
->sk_rx_dst
, RCU_INITIALIZER(dst
)));
2432 EXPORT_SYMBOL(udp_sk_rx_dst_set
);
2435 * Multicasts and broadcasts go to each listener.
2437 * Note: called only from the BH handler context.
2439 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
2441 __be32 saddr
, __be32 daddr
,
2442 struct udp_table
*udptable
,
2445 struct sock
*sk
, *first
= NULL
;
2446 unsigned short hnum
= ntohs(uh
->dest
);
2447 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
2448 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
2449 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
2450 int dif
= skb
->dev
->ifindex
;
2451 int sdif
= inet_sdif(skb
);
2452 struct hlist_node
*node
;
2453 struct sk_buff
*nskb
;
2456 hash2_any
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
) &
2458 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
2460 hslot
= &udptable
->hash2
[hash2
].hslot
;
2461 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
2464 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
2465 if (!__udp_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
2466 uh
->source
, saddr
, dif
, sdif
, hnum
))
2473 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2475 if (unlikely(!nskb
)) {
2476 atomic_inc(&sk
->sk_drops
);
2477 __UDP_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
2479 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
,
2483 if (udp_queue_rcv_skb(sk
, nskb
) > 0)
2487 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
2488 if (use_hash2
&& hash2
!= hash2_any
) {
2494 if (udp_queue_rcv_skb(first
, skb
) > 0)
2498 __UDP_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
2499 proto
== IPPROTO_UDPLITE
);
2504 /* Initialize UDP checksum. If exited with zero value (success),
2505 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2506 * Otherwise, csum completion requires checksumming packet body,
2507 * including udp header and folding it to skb->csum.
2509 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
2514 UDP_SKB_CB(skb
)->partial_cov
= 0;
2515 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
2517 if (proto
== IPPROTO_UDPLITE
) {
2518 err
= udplite_checksum_init(skb
, uh
);
2522 if (UDP_SKB_CB(skb
)->partial_cov
) {
2523 skb
->csum
= inet_compute_pseudo(skb
, proto
);
2528 /* Note, we are only interested in != 0 or == 0, thus the
2531 err
= (__force
int)skb_checksum_init_zero_check(skb
, proto
, uh
->check
,
2532 inet_compute_pseudo
);
2536 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&& !skb
->csum_valid
) {
2537 /* If SW calculated the value, we know it's bad */
2538 if (skb
->csum_complete_sw
)
2541 /* HW says the value is bad. Let's validate that.
2542 * skb->csum is no longer the full packet checksum,
2543 * so don't treat it as such.
2545 skb_checksum_complete_unset(skb
);
2551 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2552 * return code conversion for ip layer consumption
2554 static int udp_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
2559 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
2560 skb_checksum_try_convert(skb
, IPPROTO_UDP
, inet_compute_pseudo
);
2562 ret
= udp_queue_rcv_skb(sk
, skb
);
2564 /* a return value > 0 means to resubmit the input, but
2565 * it wants the return to be -protocol, or 0
2573 * All we need to do is get the socket, and then do a checksum.
2576 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
2579 struct sock
*sk
= NULL
;
2581 unsigned short ulen
;
2582 struct rtable
*rt
= skb_rtable(skb
);
2583 __be32 saddr
, daddr
;
2584 struct net
*net
= dev_net(skb
->dev
);
2588 drop_reason
= SKB_DROP_REASON_NOT_SPECIFIED
;
2591 * Validate the packet.
2593 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
2594 goto drop
; /* No space for header. */
2597 ulen
= ntohs(uh
->len
);
2598 saddr
= ip_hdr(skb
)->saddr
;
2599 daddr
= ip_hdr(skb
)->daddr
;
2601 if (ulen
> skb
->len
)
2604 if (proto
== IPPROTO_UDP
) {
2605 /* UDP validates ulen. */
2606 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
2611 if (udp4_csum_init(skb
, uh
, proto
))
2614 sk
= inet_steal_sock(net
, skb
, sizeof(struct udphdr
), saddr
, uh
->source
, daddr
, uh
->dest
,
2615 &refcounted
, udp_ehashfn
);
2620 struct dst_entry
*dst
= skb_dst(skb
);
2623 if (unlikely(rcu_dereference(sk
->sk_rx_dst
) != dst
))
2624 udp_sk_rx_dst_set(sk
, dst
);
2626 ret
= udp_unicast_rcv_skb(sk
, skb
, uh
);
2632 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
2633 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
2634 saddr
, daddr
, udptable
, proto
);
2636 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
2638 return udp_unicast_rcv_skb(sk
, skb
, uh
);
2640 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2644 /* No socket. Drop packet silently, if checksum is wrong */
2645 if (udp_lib_checksum_complete(skb
))
2648 drop_reason
= SKB_DROP_REASON_NO_SOCKET
;
2649 __UDP_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
2650 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
2653 * Hmm. We got an UDP packet to a port to which we
2654 * don't wanna listen. Ignore it.
2656 sk_skb_reason_drop(sk
, skb
, drop_reason
);
2660 drop_reason
= SKB_DROP_REASON_PKT_TOO_SMALL
;
2661 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2662 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2663 &saddr
, ntohs(uh
->source
),
2665 &daddr
, ntohs(uh
->dest
));
2670 * RFC1122: OK. Discards the bad packet silently (as far as
2671 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2673 drop_reason
= SKB_DROP_REASON_UDP_CSUM
;
2674 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2675 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2676 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
2678 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
2680 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
2681 sk_skb_reason_drop(sk
, skb
, drop_reason
);
2685 /* We can only early demux multicast if there is a single matching socket.
2686 * If more than one socket found returns NULL
2688 static struct sock
*__udp4_lib_mcast_demux_lookup(struct net
*net
,
2689 __be16 loc_port
, __be32 loc_addr
,
2690 __be16 rmt_port
, __be32 rmt_addr
,
2693 struct udp_table
*udptable
= net
->ipv4
.udp_table
;
2694 unsigned short hnum
= ntohs(loc_port
);
2695 struct sock
*sk
, *result
;
2696 struct udp_hslot
*hslot
;
2699 slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
2700 hslot
= &udptable
->hash
[slot
];
2702 /* Do not bother scanning a too big list */
2703 if (hslot
->count
> 10)
2707 sk_for_each_rcu(sk
, &hslot
->head
) {
2708 if (__udp_is_mcast_sock(net
, sk
, loc_port
, loc_addr
,
2709 rmt_port
, rmt_addr
, dif
, sdif
, hnum
)) {
2719 /* For unicast we should only early demux connected sockets or we can
2720 * break forwarding setups. The chains here can be long so only check
2721 * if the first socket is an exact match and if not move on.
2723 static struct sock
*__udp4_lib_demux_lookup(struct net
*net
,
2724 __be16 loc_port
, __be32 loc_addr
,
2725 __be16 rmt_port
, __be32 rmt_addr
,
2728 struct udp_table
*udptable
= net
->ipv4
.udp_table
;
2729 INET_ADDR_COOKIE(acookie
, rmt_addr
, loc_addr
);
2730 unsigned short hnum
= ntohs(loc_port
);
2731 struct udp_hslot
*hslot2
;
2736 hash2
= ipv4_portaddr_hash(net
, loc_addr
, hnum
);
2737 hslot2
= udp_hashslot2(udptable
, hash2
);
2738 ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
2740 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
2741 if (inet_match(net
, sk
, acookie
, ports
, dif
, sdif
))
2743 /* Only check first socket in chain */
2749 int udp_v4_early_demux(struct sk_buff
*skb
)
2751 struct net
*net
= dev_net(skb
->dev
);
2752 struct in_device
*in_dev
= NULL
;
2753 const struct iphdr
*iph
;
2754 const struct udphdr
*uh
;
2755 struct sock
*sk
= NULL
;
2756 struct dst_entry
*dst
;
2757 int dif
= skb
->dev
->ifindex
;
2758 int sdif
= inet_sdif(skb
);
2761 /* validate the packet */
2762 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct udphdr
)))
2768 if (skb
->pkt_type
== PACKET_MULTICAST
) {
2769 in_dev
= __in_dev_get_rcu(skb
->dev
);
2774 ours
= ip_check_mc_rcu(in_dev
, iph
->daddr
, iph
->saddr
,
2779 sk
= __udp4_lib_mcast_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2780 uh
->source
, iph
->saddr
,
2782 } else if (skb
->pkt_type
== PACKET_HOST
) {
2783 sk
= __udp4_lib_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2784 uh
->source
, iph
->saddr
, dif
, sdif
);
2791 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk
));
2792 skb
->destructor
= sock_pfree
;
2793 dst
= rcu_dereference(sk
->sk_rx_dst
);
2796 dst
= dst_check(dst
, 0);
2800 /* set noref for now.
2801 * any place which wants to hold dst has to call
2804 skb_dst_set_noref(skb
, dst
);
2806 /* for unconnected multicast sockets we need to validate
2807 * the source on each packet
2809 if (!inet_sk(sk
)->inet_daddr
&& in_dev
)
2810 return ip_mc_validate_source(skb
, iph
->daddr
,
2813 skb
->dev
, in_dev
, &itag
);
2818 int udp_rcv(struct sk_buff
*skb
)
2820 return __udp4_lib_rcv(skb
, dev_net(skb
->dev
)->ipv4
.udp_table
, IPPROTO_UDP
);
2823 void udp_destroy_sock(struct sock
*sk
)
2825 struct udp_sock
*up
= udp_sk(sk
);
2826 bool slow
= lock_sock_fast(sk
);
2828 /* protects from races with udp_abort() */
2829 sock_set_flag(sk
, SOCK_DEAD
);
2830 udp_flush_pending_frames(sk
);
2831 unlock_sock_fast(sk
, slow
);
2832 if (static_branch_unlikely(&udp_encap_needed_key
)) {
2833 if (up
->encap_type
) {
2834 void (*encap_destroy
)(struct sock
*sk
);
2835 encap_destroy
= READ_ONCE(up
->encap_destroy
);
2839 if (udp_test_bit(ENCAP_ENABLED
, sk
))
2840 static_branch_dec(&udp_encap_needed_key
);
2844 static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type
, unsigned short family
,
2848 if (udp_test_bit(GRO_ENABLED
, sk
) && encap_type
== UDP_ENCAP_ESPINUDP
) {
2849 if (family
== AF_INET
)
2850 WRITE_ONCE(udp_sk(sk
)->gro_receive
, xfrm4_gro_udp_encap_rcv
);
2851 else if (IS_ENABLED(CONFIG_IPV6
) && family
== AF_INET6
)
2852 WRITE_ONCE(udp_sk(sk
)->gro_receive
, ipv6_stub
->xfrm6_gro_udp_encap_rcv
);
2858 * Socket option code for UDP
2860 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
2861 sockptr_t optval
, unsigned int optlen
,
2862 int (*push_pending_frames
)(struct sock
*))
2864 struct udp_sock
*up
= udp_sk(sk
);
2867 int is_udplite
= IS_UDPLITE(sk
);
2869 if (level
== SOL_SOCKET
) {
2870 err
= sk_setsockopt(sk
, level
, optname
, optval
, optlen
);
2872 if (optname
== SO_RCVBUF
|| optname
== SO_RCVBUFFORCE
) {
2873 sockopt_lock_sock(sk
);
2874 /* paired with READ_ONCE in udp_rmem_release() */
2875 WRITE_ONCE(up
->forward_threshold
, sk
->sk_rcvbuf
>> 2);
2876 sockopt_release_sock(sk
);
2881 if (optlen
< sizeof(int))
2884 if (copy_from_sockptr(&val
, optval
, sizeof(val
)))
2887 valbool
= val
? 1 : 0;
2892 udp_set_bit(CORK
, sk
);
2894 udp_clear_bit(CORK
, sk
);
2896 push_pending_frames(sk
);
2905 case UDP_ENCAP_ESPINUDP
:
2906 set_xfrm_gro_udp_encap_rcv(val
, sk
->sk_family
, sk
);
2907 #if IS_ENABLED(CONFIG_IPV6)
2908 if (sk
->sk_family
== AF_INET6
)
2909 WRITE_ONCE(up
->encap_rcv
,
2910 ipv6_stub
->xfrm6_udp_encap_rcv
);
2913 WRITE_ONCE(up
->encap_rcv
,
2914 xfrm4_udp_encap_rcv
);
2917 case UDP_ENCAP_L2TPINUDP
:
2918 WRITE_ONCE(up
->encap_type
, val
);
2919 udp_tunnel_encap_enable(sk
);
2927 case UDP_NO_CHECK6_TX
:
2928 udp_set_no_check6_tx(sk
, valbool
);
2931 case UDP_NO_CHECK6_RX
:
2932 udp_set_no_check6_rx(sk
, valbool
);
2936 if (val
< 0 || val
> USHRT_MAX
)
2938 WRITE_ONCE(up
->gso_size
, val
);
2943 /* when enabling GRO, accept the related GSO packet type */
2945 udp_tunnel_encap_enable(sk
);
2946 udp_assign_bit(GRO_ENABLED
, sk
, valbool
);
2947 udp_assign_bit(ACCEPT_L4
, sk
, valbool
);
2948 set_xfrm_gro_udp_encap_rcv(up
->encap_type
, sk
->sk_family
, sk
);
2952 * UDP-Lite's partial checksum coverage (RFC 3828).
2954 /* The sender sets actual checksum coverage length via this option.
2955 * The case coverage > packet length is handled by send module. */
2956 case UDPLITE_SEND_CSCOV
:
2957 if (!is_udplite
) /* Disable the option on UDP sockets */
2958 return -ENOPROTOOPT
;
2959 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
2961 else if (val
> USHRT_MAX
)
2963 WRITE_ONCE(up
->pcslen
, val
);
2964 udp_set_bit(UDPLITE_SEND_CC
, sk
);
2967 /* The receiver specifies a minimum checksum coverage value. To make
2968 * sense, this should be set to at least 8 (as done below). If zero is
2969 * used, this again means full checksum coverage. */
2970 case UDPLITE_RECV_CSCOV
:
2971 if (!is_udplite
) /* Disable the option on UDP sockets */
2972 return -ENOPROTOOPT
;
2973 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
2975 else if (val
> USHRT_MAX
)
2977 WRITE_ONCE(up
->pcrlen
, val
);
2978 udp_set_bit(UDPLITE_RECV_CC
, sk
);
2988 EXPORT_SYMBOL(udp_lib_setsockopt
);
2990 int udp_setsockopt(struct sock
*sk
, int level
, int optname
, sockptr_t optval
,
2991 unsigned int optlen
)
2993 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
|| level
== SOL_SOCKET
)
2994 return udp_lib_setsockopt(sk
, level
, optname
,
2996 udp_push_pending_frames
);
2997 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
3000 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
3001 char __user
*optval
, int __user
*optlen
)
3003 struct udp_sock
*up
= udp_sk(sk
);
3006 if (get_user(len
, optlen
))
3012 len
= min_t(unsigned int, len
, sizeof(int));
3016 val
= udp_test_bit(CORK
, sk
);
3020 val
= READ_ONCE(up
->encap_type
);
3023 case UDP_NO_CHECK6_TX
:
3024 val
= udp_get_no_check6_tx(sk
);
3027 case UDP_NO_CHECK6_RX
:
3028 val
= udp_get_no_check6_rx(sk
);
3032 val
= READ_ONCE(up
->gso_size
);
3036 val
= udp_test_bit(GRO_ENABLED
, sk
);
3039 /* The following two cannot be changed on UDP sockets, the return is
3040 * always 0 (which corresponds to the full checksum coverage of UDP). */
3041 case UDPLITE_SEND_CSCOV
:
3042 val
= READ_ONCE(up
->pcslen
);
3045 case UDPLITE_RECV_CSCOV
:
3046 val
= READ_ONCE(up
->pcrlen
);
3050 return -ENOPROTOOPT
;
3053 if (put_user(len
, optlen
))
3055 if (copy_to_user(optval
, &val
, len
))
3059 EXPORT_SYMBOL(udp_lib_getsockopt
);
3061 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
3062 char __user
*optval
, int __user
*optlen
)
3064 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
3065 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
3066 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
3070 * udp_poll - wait for a UDP event.
3071 * @file: - file struct
3073 * @wait: - poll table
3075 * This is same as datagram poll, except for the special case of
3076 * blocking sockets. If application is using a blocking fd
3077 * and a packet with checksum error is in the queue;
3078 * then it could get return from select indicating data available
3079 * but then block when reading it. Add special case code
3080 * to work around these arguably broken applications.
3082 __poll_t
udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
3084 __poll_t mask
= datagram_poll(file
, sock
, wait
);
3085 struct sock
*sk
= sock
->sk
;
3087 if (!skb_queue_empty_lockless(&udp_sk(sk
)->reader_queue
))
3088 mask
|= EPOLLIN
| EPOLLRDNORM
;
3090 /* Check for false positives due to checksum errors */
3091 if ((mask
& EPOLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
3092 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && first_packet_length(sk
) == -1)
3093 mask
&= ~(EPOLLIN
| EPOLLRDNORM
);
3095 /* psock ingress_msg queue should not contain any bad checksum frames */
3096 if (sk_is_readable(sk
))
3097 mask
|= EPOLLIN
| EPOLLRDNORM
;
3101 EXPORT_SYMBOL(udp_poll
);
3103 int udp_abort(struct sock
*sk
, int err
)
3105 if (!has_current_bpf_ctx())
3108 /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
3111 if (sock_flag(sk
, SOCK_DEAD
))
3115 sk_error_report(sk
);
3116 __udp_disconnect(sk
, 0);
3119 if (!has_current_bpf_ctx())
3124 EXPORT_SYMBOL_GPL(udp_abort
);
3126 struct proto udp_prot
= {
3128 .owner
= THIS_MODULE
,
3129 .close
= udp_lib_close
,
3130 .pre_connect
= udp_pre_connect
,
3131 .connect
= udp_connect
,
3132 .disconnect
= udp_disconnect
,
3134 .init
= udp_init_sock
,
3135 .destroy
= udp_destroy_sock
,
3136 .setsockopt
= udp_setsockopt
,
3137 .getsockopt
= udp_getsockopt
,
3138 .sendmsg
= udp_sendmsg
,
3139 .recvmsg
= udp_recvmsg
,
3140 .splice_eof
= udp_splice_eof
,
3141 .release_cb
= ip4_datagram_release_cb
,
3142 .hash
= udp_lib_hash
,
3143 .unhash
= udp_lib_unhash
,
3144 .rehash
= udp_v4_rehash
,
3145 .get_port
= udp_v4_get_port
,
3146 .put_port
= udp_lib_unhash
,
3147 #ifdef CONFIG_BPF_SYSCALL
3148 .psock_update_sk_prot
= udp_bpf_update_proto
,
3150 .memory_allocated
= &udp_memory_allocated
,
3151 .per_cpu_fw_alloc
= &udp_memory_per_cpu_fw_alloc
,
3153 .sysctl_mem
= sysctl_udp_mem
,
3154 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
3155 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
3156 .obj_size
= sizeof(struct udp_sock
),
3157 .h
.udp_table
= NULL
,
3158 .diag_destroy
= udp_abort
,
3160 EXPORT_SYMBOL(udp_prot
);
3162 /* ------------------------------------------------------------------------ */
3163 #ifdef CONFIG_PROC_FS
3165 static unsigned short seq_file_family(const struct seq_file
*seq
);
3166 static bool seq_sk_match(struct seq_file
*seq
, const struct sock
*sk
)
3168 unsigned short family
= seq_file_family(seq
);
3170 /* AF_UNSPEC is used as a match all */
3171 return ((family
== AF_UNSPEC
|| family
== sk
->sk_family
) &&
3172 net_eq(sock_net(sk
), seq_file_net(seq
)));
3175 #ifdef CONFIG_BPF_SYSCALL
3176 static const struct seq_operations bpf_iter_udp_seq_ops
;
3178 static struct udp_table
*udp_get_table_seq(struct seq_file
*seq
,
3181 const struct udp_seq_afinfo
*afinfo
;
3183 #ifdef CONFIG_BPF_SYSCALL
3184 if (seq
->op
== &bpf_iter_udp_seq_ops
)
3185 return net
->ipv4
.udp_table
;
3188 afinfo
= pde_data(file_inode(seq
->file
));
3189 return afinfo
->udp_table
? : net
->ipv4
.udp_table
;
3192 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
3194 struct udp_iter_state
*state
= seq
->private;
3195 struct net
*net
= seq_file_net(seq
);
3196 struct udp_table
*udptable
;
3199 udptable
= udp_get_table_seq(seq
, net
);
3201 for (state
->bucket
= start
; state
->bucket
<= udptable
->mask
;
3203 struct udp_hslot
*hslot
= &udptable
->hash
[state
->bucket
];
3205 if (hlist_empty(&hslot
->head
))
3208 spin_lock_bh(&hslot
->lock
);
3209 sk_for_each(sk
, &hslot
->head
) {
3210 if (seq_sk_match(seq
, sk
))
3213 spin_unlock_bh(&hslot
->lock
);
3220 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
3222 struct udp_iter_state
*state
= seq
->private;
3223 struct net
*net
= seq_file_net(seq
);
3224 struct udp_table
*udptable
;
3228 } while (sk
&& !seq_sk_match(seq
, sk
));
3231 udptable
= udp_get_table_seq(seq
, net
);
3233 if (state
->bucket
<= udptable
->mask
)
3234 spin_unlock_bh(&udptable
->hash
[state
->bucket
].lock
);
3236 return udp_get_first(seq
, state
->bucket
+ 1);
3241 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
3243 struct sock
*sk
= udp_get_first(seq
, 0);
3246 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
3248 return pos
? NULL
: sk
;
3251 void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3253 struct udp_iter_state
*state
= seq
->private;
3254 state
->bucket
= MAX_UDP_PORTS
;
3256 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
3258 EXPORT_SYMBOL(udp_seq_start
);
3260 void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3264 if (v
== SEQ_START_TOKEN
)
3265 sk
= udp_get_idx(seq
, 0);
3267 sk
= udp_get_next(seq
, v
);
3272 EXPORT_SYMBOL(udp_seq_next
);
3274 void udp_seq_stop(struct seq_file
*seq
, void *v
)
3276 struct udp_iter_state
*state
= seq
->private;
3277 struct udp_table
*udptable
;
3279 udptable
= udp_get_table_seq(seq
, seq_file_net(seq
));
3281 if (state
->bucket
<= udptable
->mask
)
3282 spin_unlock_bh(&udptable
->hash
[state
->bucket
].lock
);
3284 EXPORT_SYMBOL(udp_seq_stop
);
3286 /* ------------------------------------------------------------------------ */
3287 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
3290 struct inet_sock
*inet
= inet_sk(sp
);
3291 __be32 dest
= inet
->inet_daddr
;
3292 __be32 src
= inet
->inet_rcv_saddr
;
3293 __u16 destp
= ntohs(inet
->inet_dport
);
3294 __u16 srcp
= ntohs(inet
->inet_sport
);
3296 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
3297 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u",
3298 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
3299 sk_wmem_alloc_get(sp
),
3302 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
3304 refcount_read(&sp
->sk_refcnt
), sp
,
3305 atomic_read(&sp
->sk_drops
));
3308 int udp4_seq_show(struct seq_file
*seq
, void *v
)
3310 seq_setwidth(seq
, 127);
3311 if (v
== SEQ_START_TOKEN
)
3312 seq_puts(seq
, " sl local_address rem_address st tx_queue "
3313 "rx_queue tr tm->when retrnsmt uid timeout "
3314 "inode ref pointer drops");
3316 struct udp_iter_state
*state
= seq
->private;
3318 udp4_format_sock(v
, seq
, state
->bucket
);
3324 #ifdef CONFIG_BPF_SYSCALL
3325 struct bpf_iter__udp
{
3326 __bpf_md_ptr(struct bpf_iter_meta
*, meta
);
3327 __bpf_md_ptr(struct udp_sock
*, udp_sk
);
3328 uid_t uid
__aligned(8);
3329 int bucket
__aligned(8);
3332 struct bpf_udp_iter_state
{
3333 struct udp_iter_state state
;
3334 unsigned int cur_sk
;
3335 unsigned int end_sk
;
3336 unsigned int max_sk
;
3338 struct sock
**batch
;
3339 bool st_bucket_done
;
3342 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state
*iter
,
3343 unsigned int new_batch_sz
);
3344 static struct sock
*bpf_iter_udp_batch(struct seq_file
*seq
)
3346 struct bpf_udp_iter_state
*iter
= seq
->private;
3347 struct udp_iter_state
*state
= &iter
->state
;
3348 struct net
*net
= seq_file_net(seq
);
3349 int resume_bucket
, resume_offset
;
3350 struct udp_table
*udptable
;
3351 unsigned int batch_sks
= 0;
3352 bool resized
= false;
3355 resume_bucket
= state
->bucket
;
3356 resume_offset
= iter
->offset
;
3358 /* The current batch is done, so advance the bucket. */
3359 if (iter
->st_bucket_done
)
3362 udptable
= udp_get_table_seq(seq
, net
);
3365 /* New batch for the next bucket.
3366 * Iterate over the hash table to find a bucket with sockets matching
3367 * the iterator attributes, and return the first matching socket from
3368 * the bucket. The remaining matched sockets from the bucket are batched
3369 * before releasing the bucket lock. This allows BPF programs that are
3370 * called in seq_show to acquire the bucket lock if needed.
3374 iter
->st_bucket_done
= false;
3377 for (; state
->bucket
<= udptable
->mask
; state
->bucket
++) {
3378 struct udp_hslot
*hslot2
= &udptable
->hash2
[state
->bucket
].hslot
;
3380 if (hlist_empty(&hslot2
->head
))
3384 spin_lock_bh(&hslot2
->lock
);
3385 udp_portaddr_for_each_entry(sk
, &hslot2
->head
) {
3386 if (seq_sk_match(seq
, sk
)) {
3387 /* Resume from the last iterated socket at the
3388 * offset in the bucket before iterator was stopped.
3390 if (state
->bucket
== resume_bucket
&&
3391 iter
->offset
< resume_offset
) {
3395 if (iter
->end_sk
< iter
->max_sk
) {
3397 iter
->batch
[iter
->end_sk
++] = sk
;
3402 spin_unlock_bh(&hslot2
->lock
);
3408 /* All done: no batch made. */
3412 if (iter
->end_sk
== batch_sks
) {
3413 /* Batching is done for the current bucket; return the first
3414 * socket to be iterated from the batch.
3416 iter
->st_bucket_done
= true;
3419 if (!resized
&& !bpf_iter_udp_realloc_batch(iter
, batch_sks
* 3 / 2)) {
3421 /* After allocating a larger batch, retry one more time to grab
3427 return iter
->batch
[0];
3430 static void *bpf_iter_udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3432 struct bpf_udp_iter_state
*iter
= seq
->private;
3435 /* Whenever seq_next() is called, the iter->cur_sk is
3436 * done with seq_show(), so unref the iter->cur_sk.
3438 if (iter
->cur_sk
< iter
->end_sk
) {
3439 sock_put(iter
->batch
[iter
->cur_sk
++]);
3443 /* After updating iter->cur_sk, check if there are more sockets
3444 * available in the current bucket batch.
3446 if (iter
->cur_sk
< iter
->end_sk
)
3447 sk
= iter
->batch
[iter
->cur_sk
];
3449 /* Prepare a new batch. */
3450 sk
= bpf_iter_udp_batch(seq
);
3456 static void *bpf_iter_udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3458 /* bpf iter does not support lseek, so it always
3459 * continue from where it was stop()-ped.
3462 return bpf_iter_udp_batch(seq
);
3464 return SEQ_START_TOKEN
;
3467 static int udp_prog_seq_show(struct bpf_prog
*prog
, struct bpf_iter_meta
*meta
,
3468 struct udp_sock
*udp_sk
, uid_t uid
, int bucket
)
3470 struct bpf_iter__udp ctx
;
3472 meta
->seq_num
--; /* skip SEQ_START_TOKEN */
3474 ctx
.udp_sk
= udp_sk
;
3476 ctx
.bucket
= bucket
;
3477 return bpf_iter_run_prog(prog
, &ctx
);
3480 static int bpf_iter_udp_seq_show(struct seq_file
*seq
, void *v
)
3482 struct udp_iter_state
*state
= seq
->private;
3483 struct bpf_iter_meta meta
;
3484 struct bpf_prog
*prog
;
3485 struct sock
*sk
= v
;
3489 if (v
== SEQ_START_TOKEN
)
3494 if (unlikely(sk_unhashed(sk
))) {
3499 uid
= from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sk
));
3501 prog
= bpf_iter_get_info(&meta
, false);
3502 ret
= udp_prog_seq_show(prog
, &meta
, v
, uid
, state
->bucket
);
3509 static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state
*iter
)
3511 while (iter
->cur_sk
< iter
->end_sk
)
3512 sock_put(iter
->batch
[iter
->cur_sk
++]);
3515 static void bpf_iter_udp_seq_stop(struct seq_file
*seq
, void *v
)
3517 struct bpf_udp_iter_state
*iter
= seq
->private;
3518 struct bpf_iter_meta meta
;
3519 struct bpf_prog
*prog
;
3523 prog
= bpf_iter_get_info(&meta
, true);
3525 (void)udp_prog_seq_show(prog
, &meta
, v
, 0, 0);
3528 if (iter
->cur_sk
< iter
->end_sk
) {
3529 bpf_iter_udp_put_batch(iter
);
3530 iter
->st_bucket_done
= false;
3534 static const struct seq_operations bpf_iter_udp_seq_ops
= {
3535 .start
= bpf_iter_udp_seq_start
,
3536 .next
= bpf_iter_udp_seq_next
,
3537 .stop
= bpf_iter_udp_seq_stop
,
3538 .show
= bpf_iter_udp_seq_show
,
3542 static unsigned short seq_file_family(const struct seq_file
*seq
)
3544 const struct udp_seq_afinfo
*afinfo
;
3546 #ifdef CONFIG_BPF_SYSCALL
3547 /* BPF iterator: bpf programs to filter sockets. */
3548 if (seq
->op
== &bpf_iter_udp_seq_ops
)
3552 /* Proc fs iterator */
3553 afinfo
= pde_data(file_inode(seq
->file
));
3554 return afinfo
->family
;
3557 const struct seq_operations udp_seq_ops
= {
3558 .start
= udp_seq_start
,
3559 .next
= udp_seq_next
,
3560 .stop
= udp_seq_stop
,
3561 .show
= udp4_seq_show
,
3563 EXPORT_SYMBOL(udp_seq_ops
);
3565 static struct udp_seq_afinfo udp4_seq_afinfo
= {
3570 static int __net_init
udp4_proc_init_net(struct net
*net
)
3572 if (!proc_create_net_data("udp", 0444, net
->proc_net
, &udp_seq_ops
,
3573 sizeof(struct udp_iter_state
), &udp4_seq_afinfo
))
3578 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
3580 remove_proc_entry("udp", net
->proc_net
);
3583 static struct pernet_operations udp4_net_ops
= {
3584 .init
= udp4_proc_init_net
,
3585 .exit
= udp4_proc_exit_net
,
3588 int __init
udp4_proc_init(void)
3590 return register_pernet_subsys(&udp4_net_ops
);
3593 void udp4_proc_exit(void)
3595 unregister_pernet_subsys(&udp4_net_ops
);
3597 #endif /* CONFIG_PROC_FS */
3599 static __initdata
unsigned long uhash_entries
;
3600 static int __init
set_uhash_entries(char *str
)
3607 ret
= kstrtoul(str
, 0, &uhash_entries
);
3611 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
3612 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
3615 __setup("uhash_entries=", set_uhash_entries
);
3617 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
3619 unsigned int i
, slot_size
;
3621 slot_size
= sizeof(struct udp_hslot
) + sizeof(struct udp_hslot_main
) +
3622 udp_hash4_slot_size();
3623 table
->hash
= alloc_large_system_hash(name
,
3626 21, /* one slot per 2 MB */
3630 UDP_HTABLE_SIZE_MIN
,
3631 UDP_HTABLE_SIZE_MAX
);
3633 table
->hash2
= (void *)(table
->hash
+ (table
->mask
+ 1));
3634 for (i
= 0; i
<= table
->mask
; i
++) {
3635 INIT_HLIST_HEAD(&table
->hash
[i
].head
);
3636 table
->hash
[i
].count
= 0;
3637 spin_lock_init(&table
->hash
[i
].lock
);
3639 for (i
= 0; i
<= table
->mask
; i
++) {
3640 INIT_HLIST_HEAD(&table
->hash2
[i
].hslot
.head
);
3641 table
->hash2
[i
].hslot
.count
= 0;
3642 spin_lock_init(&table
->hash2
[i
].hslot
.lock
);
3644 udp_table_hash4_init(table
);
3647 u32
udp_flow_hashrnd(void)
3649 static u32 hashrnd __read_mostly
;
3651 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
3655 EXPORT_SYMBOL(udp_flow_hashrnd
);
3657 static void __net_init
udp_sysctl_init(struct net
*net
)
3659 net
->ipv4
.sysctl_udp_rmem_min
= PAGE_SIZE
;
3660 net
->ipv4
.sysctl_udp_wmem_min
= PAGE_SIZE
;
3662 #ifdef CONFIG_NET_L3_MASTER_DEV
3663 net
->ipv4
.sysctl_udp_l3mdev_accept
= 0;
3667 static struct udp_table __net_init
*udp_pernet_table_alloc(unsigned int hash_entries
)
3669 struct udp_table
*udptable
;
3670 unsigned int slot_size
;
3673 udptable
= kmalloc(sizeof(*udptable
), GFP_KERNEL
);
3677 slot_size
= sizeof(struct udp_hslot
) + sizeof(struct udp_hslot_main
) +
3678 udp_hash4_slot_size();
3679 udptable
->hash
= vmalloc_huge(hash_entries
* slot_size
,
3680 GFP_KERNEL_ACCOUNT
);
3681 if (!udptable
->hash
)
3684 udptable
->hash2
= (void *)(udptable
->hash
+ hash_entries
);
3685 udptable
->mask
= hash_entries
- 1;
3686 udptable
->log
= ilog2(hash_entries
);
3688 for (i
= 0; i
< hash_entries
; i
++) {
3689 INIT_HLIST_HEAD(&udptable
->hash
[i
].head
);
3690 udptable
->hash
[i
].count
= 0;
3691 spin_lock_init(&udptable
->hash
[i
].lock
);
3693 INIT_HLIST_HEAD(&udptable
->hash2
[i
].hslot
.head
);
3694 udptable
->hash2
[i
].hslot
.count
= 0;
3695 spin_lock_init(&udptable
->hash2
[i
].hslot
.lock
);
3697 udp_table_hash4_init(udptable
);
3707 static void __net_exit
udp_pernet_table_free(struct net
*net
)
3709 struct udp_table
*udptable
= net
->ipv4
.udp_table
;
3711 if (udptable
== &udp_table
)
3714 kvfree(udptable
->hash
);
3718 static void __net_init
udp_set_table(struct net
*net
)
3720 struct udp_table
*udptable
;
3721 unsigned int hash_entries
;
3722 struct net
*old_net
;
3724 if (net_eq(net
, &init_net
))
3727 old_net
= current
->nsproxy
->net_ns
;
3728 hash_entries
= READ_ONCE(old_net
->ipv4
.sysctl_udp_child_hash_entries
);
3732 /* Set min to keep the bitmap on stack in udp_lib_get_port() */
3733 if (hash_entries
< UDP_HTABLE_SIZE_MIN_PERNET
)
3734 hash_entries
= UDP_HTABLE_SIZE_MIN_PERNET
;
3736 hash_entries
= roundup_pow_of_two(hash_entries
);
3738 udptable
= udp_pernet_table_alloc(hash_entries
);
3740 net
->ipv4
.udp_table
= udptable
;
3742 pr_warn("Failed to allocate UDP hash table (entries: %u) "
3743 "for a netns, fallback to the global one\n",
3746 net
->ipv4
.udp_table
= &udp_table
;
3750 static int __net_init
udp_pernet_init(struct net
*net
)
3752 udp_sysctl_init(net
);
3758 static void __net_exit
udp_pernet_exit(struct net
*net
)
3760 udp_pernet_table_free(net
);
3763 static struct pernet_operations __net_initdata udp_sysctl_ops
= {
3764 .init
= udp_pernet_init
,
3765 .exit
= udp_pernet_exit
,
3768 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3769 DEFINE_BPF_ITER_FUNC(udp
, struct bpf_iter_meta
*meta
,
3770 struct udp_sock
*udp_sk
, uid_t uid
, int bucket
)
3772 static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state
*iter
,
3773 unsigned int new_batch_sz
)
3775 struct sock
**new_batch
;
3777 new_batch
= kvmalloc_array(new_batch_sz
, sizeof(*new_batch
),
3778 GFP_USER
| __GFP_NOWARN
);
3782 bpf_iter_udp_put_batch(iter
);
3783 kvfree(iter
->batch
);
3784 iter
->batch
= new_batch
;
3785 iter
->max_sk
= new_batch_sz
;
3790 #define INIT_BATCH_SZ 16
3792 static int bpf_iter_init_udp(void *priv_data
, struct bpf_iter_aux_info
*aux
)
3794 struct bpf_udp_iter_state
*iter
= priv_data
;
3797 ret
= bpf_iter_init_seq_net(priv_data
, aux
);
3801 ret
= bpf_iter_udp_realloc_batch(iter
, INIT_BATCH_SZ
);
3803 bpf_iter_fini_seq_net(priv_data
);
3808 static void bpf_iter_fini_udp(void *priv_data
)
3810 struct bpf_udp_iter_state
*iter
= priv_data
;
3812 bpf_iter_fini_seq_net(priv_data
);
3813 kvfree(iter
->batch
);
3816 static const struct bpf_iter_seq_info udp_seq_info
= {
3817 .seq_ops
= &bpf_iter_udp_seq_ops
,
3818 .init_seq_private
= bpf_iter_init_udp
,
3819 .fini_seq_private
= bpf_iter_fini_udp
,
3820 .seq_priv_size
= sizeof(struct bpf_udp_iter_state
),
3823 static struct bpf_iter_reg udp_reg_info
= {
3825 .ctx_arg_info_size
= 1,
3827 { offsetof(struct bpf_iter__udp
, udp_sk
),
3828 PTR_TO_BTF_ID_OR_NULL
| PTR_TRUSTED
},
3830 .seq_info
= &udp_seq_info
,
3833 static void __init
bpf_iter_register(void)
3835 udp_reg_info
.ctx_arg_info
[0].btf_id
= btf_sock_ids
[BTF_SOCK_TYPE_UDP
];
3836 if (bpf_iter_reg_target(&udp_reg_info
))
3837 pr_warn("Warning: could not register bpf iterator udp\n");
3841 void __init
udp_init(void)
3843 unsigned long limit
;
3846 udp_table_init(&udp_table
, "UDP");
3847 limit
= nr_free_buffer_pages() / 8;
3848 limit
= max(limit
, 128UL);
3849 sysctl_udp_mem
[0] = limit
/ 4 * 3;
3850 sysctl_udp_mem
[1] = limit
;
3851 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
3853 /* 16 spinlocks per cpu */
3854 udp_busylocks_log
= ilog2(nr_cpu_ids
) + 4;
3855 udp_busylocks
= kmalloc(sizeof(spinlock_t
) << udp_busylocks_log
,
3858 panic("UDP: failed to alloc udp_busylocks\n");
3859 for (i
= 0; i
< (1U << udp_busylocks_log
); i
++)
3860 spin_lock_init(udp_busylocks
+ i
);
3862 if (register_pernet_subsys(&udp_sysctl_ops
))
3863 panic("UDP: failed to init sysctl parameters.\n");
3865 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3866 bpf_iter_register();