1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Definitions for the UDP module.
9 * Version: @(#)udp.h 1.0.2 05/07/93
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
21 #include <linux/list.h>
22 #include <linux/bug.h>
23 #include <net/inet_sock.h>
28 #include <linux/ipv6.h>
29 #include <linux/seq_file.h>
30 #include <linux/poll.h>
31 #include <linux/indirect_call_wrapper.h>
34 * struct udp_skb_cb - UDP(-Lite) private variables
36 * @header: private variables used by IPv4/IPv6
37 * @cscov: checksum coverage length (UDP-Lite only)
38 * @partial_cov: if set indicates partial csum coverage
42 struct inet_skb_parm h4
;
43 #if IS_ENABLED(CONFIG_IPV6)
44 struct inet6_skb_parm h6
;
50 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
53 * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
55 * @head: head of list of sockets
56 * @nulls_head: head of list of sockets, only used by hash4
57 * @count: number of sockets in 'head' list
58 * @lock: spinlock protecting changes to head/count
62 struct hlist_head head
;
63 /* hash4 uses hlist_nulls to avoid moving wrongly onto another
64 * hlist, because rehash() can happen with lookup().
66 struct hlist_nulls_head nulls_head
;
70 } __aligned(2 * sizeof(long));
73 * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
75 * @hslot: basic hash slot
76 * @hash4_cnt: number of sockets in hslot4 of the same
77 * (local port, local address)
79 struct udp_hslot_main
{
80 struct udp_hslot hslot
; /* must be the first member */
81 #if !IS_ENABLED(CONFIG_BASE_SMALL)
84 } __aligned(2 * sizeof(long));
85 #define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
88 * struct udp_table - UDP table
90 * @hash: hash table, sockets are hashed on (local port)
91 * @hash2: hash table, sockets are hashed on (local port, local address)
92 * @hash4: hash table, connected sockets are hashed on
93 * (local port, local address, remote port, remote address)
94 * @mask: number of slots in hash tables, minus 1
95 * @log: log2(number of slots in hash table)
98 struct udp_hslot
*hash
;
99 struct udp_hslot_main
*hash2
;
100 #if !IS_ENABLED(CONFIG_BASE_SMALL)
101 struct udp_hslot
*hash4
;
106 extern struct udp_table udp_table
;
107 void udp_table_init(struct udp_table
*, const char *);
108 static inline struct udp_hslot
*udp_hashslot(struct udp_table
*table
,
109 const struct net
*net
,
112 return &table
->hash
[udp_hashfn(net
, num
, table
->mask
)];
116 * For secondary hash, net_hash_mix() is performed before calling
117 * udp_hashslot2(), this explains difference with udp_hashslot()
119 static inline struct udp_hslot
*udp_hashslot2(struct udp_table
*table
,
122 return &table
->hash2
[hash
& table
->mask
].hslot
;
125 #if IS_ENABLED(CONFIG_BASE_SMALL)
126 static inline void udp_table_hash4_init(struct udp_table
*table
)
130 static inline struct udp_hslot
*udp_hashslot4(struct udp_table
*table
,
137 static inline bool udp_hashed4(const struct sock
*sk
)
142 static inline unsigned int udp_hash4_slot_size(void)
147 static inline bool udp_has_hash4(const struct udp_hslot
*hslot2
)
152 static inline void udp_hash4_inc(struct udp_hslot
*hslot2
)
156 static inline void udp_hash4_dec(struct udp_hslot
*hslot2
)
159 #else /* !CONFIG_BASE_SMALL */
161 /* Must be called with table->hash2 initialized */
162 static inline void udp_table_hash4_init(struct udp_table
*table
)
164 table
->hash4
= (void *)(table
->hash2
+ (table
->mask
+ 1));
165 for (int i
= 0; i
<= table
->mask
; i
++) {
166 table
->hash2
[i
].hash4_cnt
= 0;
168 INIT_HLIST_NULLS_HEAD(&table
->hash4
[i
].nulls_head
, i
);
169 table
->hash4
[i
].count
= 0;
170 spin_lock_init(&table
->hash4
[i
].lock
);
174 static inline struct udp_hslot
*udp_hashslot4(struct udp_table
*table
,
177 return &table
->hash4
[hash
& table
->mask
];
180 static inline bool udp_hashed4(const struct sock
*sk
)
182 return !hlist_nulls_unhashed(&udp_sk(sk
)->udp_lrpa_node
);
185 static inline unsigned int udp_hash4_slot_size(void)
187 return sizeof(struct udp_hslot
);
190 static inline bool udp_has_hash4(const struct udp_hslot
*hslot2
)
192 return UDP_HSLOT_MAIN(hslot2
)->hash4_cnt
;
195 static inline void udp_hash4_inc(struct udp_hslot
*hslot2
)
197 UDP_HSLOT_MAIN(hslot2
)->hash4_cnt
++;
200 static inline void udp_hash4_dec(struct udp_hslot
*hslot2
)
202 UDP_HSLOT_MAIN(hslot2
)->hash4_cnt
--;
204 #endif /* CONFIG_BASE_SMALL */
206 extern struct proto udp_prot
;
208 extern atomic_long_t udp_memory_allocated
;
209 DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc
);
211 /* sysctl variables for udp */
212 extern long sysctl_udp_mem
[3];
213 extern int sysctl_udp_rmem_min
;
214 extern int sysctl_udp_wmem_min
;
219 * Generic checksumming routines for UDP(-Lite) v4 and v6
221 static inline __sum16
__udp_lib_checksum_complete(struct sk_buff
*skb
)
223 return (UDP_SKB_CB(skb
)->cscov
== skb
->len
?
224 __skb_checksum_complete(skb
) :
225 __skb_checksum_complete_head(skb
, UDP_SKB_CB(skb
)->cscov
));
228 static inline int udp_lib_checksum_complete(struct sk_buff
*skb
)
230 return !skb_csum_unnecessary(skb
) &&
231 __udp_lib_checksum_complete(skb
);
235 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
236 * @sk: socket we are writing to
237 * @skb: sk_buff containing the filled-in UDP header
238 * (checksum field must be zeroed out)
240 static inline __wsum
udp_csum_outgoing(struct sock
*sk
, struct sk_buff
*skb
)
242 __wsum csum
= csum_partial(skb_transport_header(skb
),
243 sizeof(struct udphdr
), 0);
244 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
245 csum
= csum_add(csum
, skb
->csum
);
250 static inline __wsum
udp_csum(struct sk_buff
*skb
)
252 __wsum csum
= csum_partial(skb_transport_header(skb
),
253 sizeof(struct udphdr
), skb
->csum
);
255 for (skb
= skb_shinfo(skb
)->frag_list
; skb
; skb
= skb
->next
) {
256 csum
= csum_add(csum
, skb
->csum
);
261 static inline __sum16
udp_v4_check(int len
, __be32 saddr
,
262 __be32 daddr
, __wsum base
)
264 return csum_tcpudp_magic(saddr
, daddr
, len
, IPPROTO_UDP
, base
);
267 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
268 __be32 saddr
, __be32 daddr
, int len
);
270 static inline void udp_csum_pull_header(struct sk_buff
*skb
)
272 if (!skb
->csum_valid
&& skb
->ip_summed
== CHECKSUM_NONE
)
273 skb
->csum
= csum_partial(skb
->data
, sizeof(struct udphdr
),
275 skb_pull_rcsum(skb
, sizeof(struct udphdr
));
276 UDP_SKB_CB(skb
)->cscov
-= sizeof(struct udphdr
);
279 typedef struct sock
*(*udp_lookup_t
)(const struct sk_buff
*skb
, __be16 sport
,
282 void udp_v6_early_demux(struct sk_buff
*skb
);
283 INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff
*));
285 struct sk_buff
*__udp_gso_segment(struct sk_buff
*gso_skb
,
286 netdev_features_t features
, bool is_ipv6
);
288 static inline void udp_lib_init_sock(struct sock
*sk
)
290 struct udp_sock
*up
= udp_sk(sk
);
292 skb_queue_head_init(&up
->reader_queue
);
293 up
->forward_threshold
= sk
->sk_rcvbuf
>> 2;
294 set_bit(SOCK_CUSTOM_SOCKOPT
, &sk
->sk_socket
->flags
);
297 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
298 static inline int udp_lib_hash(struct sock
*sk
)
304 void udp_lib_unhash(struct sock
*sk
);
305 void udp_lib_rehash(struct sock
*sk
, u16 new_hash
, u16 new_hash4
);
306 u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
, const __u16 lport
,
307 const __be32 faddr
, const __be16 fport
);
309 static inline void udp_lib_close(struct sock
*sk
, long timeout
)
311 sk_common_release(sk
);
314 /* hash4 routines shared between UDPv4/6 */
315 #if IS_ENABLED(CONFIG_BASE_SMALL)
316 static inline void udp_lib_hash4(struct sock
*sk
, u16 hash
)
320 static inline void udp4_hash4(struct sock
*sk
)
323 #else /* !CONFIG_BASE_SMALL */
324 void udp_lib_hash4(struct sock
*sk
, u16 hash
);
325 void udp4_hash4(struct sock
*sk
);
326 #endif /* CONFIG_BASE_SMALL */
328 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
329 unsigned int hash2_nulladdr
);
331 u32
udp_flow_hashrnd(void);
333 static inline __be16
udp_flow_src_port(struct net
*net
, struct sk_buff
*skb
,
334 int min
, int max
, bool use_eth
)
339 /* Use default range */
340 inet_get_local_port_range(net
, &min
, &max
);
343 hash
= skb_get_hash(skb
);
344 if (unlikely(!hash
)) {
346 /* Can't find a normal hash, caller has indicated an
347 * Ethernet packet so use that to compute a hash.
349 hash
= jhash(skb
->data
, 2 * ETH_ALEN
,
350 (__force u32
) skb
->protocol
);
352 /* Can't derive any sort of hash for the packet, set
353 * to some consistent random value.
355 hash
= udp_flow_hashrnd();
359 /* Since this is being sent on the wire obfuscate hash a bit
360 * to minimize possibility that any useful information to an
361 * attacker is leaked. Only upper 16 bits are relevant in the
362 * computation for 16 bit port value.
366 return htons((((u64
) hash
* (max
- min
)) >> 32) + min
);
369 static inline int udp_rqueue_get(struct sock
*sk
)
371 return sk_rmem_alloc_get(sk
) - READ_ONCE(udp_sk(sk
)->forward_deficit
);
374 static inline bool udp_sk_bound_dev_eq(const struct net
*net
, int bound_dev_if
,
377 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
378 return inet_bound_dev_eq(!!READ_ONCE(net
->ipv4
.sysctl_udp_l3mdev_accept
),
379 bound_dev_if
, dif
, sdif
);
381 return inet_bound_dev_eq(true, bound_dev_if
, dif
, sdif
);
386 void udp_destruct_common(struct sock
*sk
);
387 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
);
388 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
);
389 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
);
390 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
, int *off
,
392 static inline struct sk_buff
*skb_recv_udp(struct sock
*sk
, unsigned int flags
,
397 return __skb_recv_udp(sk
, flags
, &off
, err
);
400 int udp_v4_early_demux(struct sk_buff
*skb
);
401 bool udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
);
402 int udp_err(struct sk_buff
*, u32
);
403 int udp_abort(struct sock
*sk
, int err
);
404 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
);
405 void udp_splice_eof(struct socket
*sock
);
406 int udp_push_pending_frames(struct sock
*sk
);
407 void udp_flush_pending_frames(struct sock
*sk
);
408 int udp_cmsg_send(struct sock
*sk
, struct msghdr
*msg
, u16
*gso_size
);
409 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
);
410 int udp_rcv(struct sk_buff
*skb
);
411 int udp_ioctl(struct sock
*sk
, int cmd
, int *karg
);
412 int udp_init_sock(struct sock
*sk
);
413 int udp_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
);
414 int __udp_disconnect(struct sock
*sk
, int flags
);
415 int udp_disconnect(struct sock
*sk
, int flags
);
416 __poll_t
udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
);
417 struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
418 netdev_features_t features
,
420 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
421 char __user
*optval
, int __user
*optlen
);
422 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
423 sockptr_t optval
, unsigned int optlen
,
424 int (*push_pending_frames
)(struct sock
*));
425 struct sock
*udp4_lib_lookup(const struct net
*net
, __be32 saddr
, __be16 sport
,
426 __be32 daddr
, __be16 dport
, int dif
);
427 struct sock
*__udp4_lib_lookup(const struct net
*net
, __be32 saddr
,
429 __be32 daddr
, __be16 dport
, int dif
, int sdif
,
430 struct udp_table
*tbl
, struct sk_buff
*skb
);
431 struct sock
*udp4_lib_lookup_skb(const struct sk_buff
*skb
,
432 __be16 sport
, __be16 dport
);
433 struct sock
*udp6_lib_lookup(const struct net
*net
,
434 const struct in6_addr
*saddr
, __be16 sport
,
435 const struct in6_addr
*daddr
, __be16 dport
,
437 struct sock
*__udp6_lib_lookup(const struct net
*net
,
438 const struct in6_addr
*saddr
, __be16 sport
,
439 const struct in6_addr
*daddr
, __be16 dport
,
440 int dif
, int sdif
, struct udp_table
*tbl
,
441 struct sk_buff
*skb
);
442 struct sock
*udp6_lib_lookup_skb(const struct sk_buff
*skb
,
443 __be16 sport
, __be16 dport
);
444 int udp_read_skb(struct sock
*sk
, skb_read_actor_t recv_actor
);
446 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
447 * possibly multiple cache miss on dequeue()
449 struct udp_dev_scratch
{
450 /* skb->truesize and the stateless bit are embedded in a single field;
451 * do not use a bitfield since the compiler emits better/smaller code
456 #if BITS_PER_LONG == 64
457 /* len and the bit needed to compute skb_csum_unnecessary
458 * will be on cold cache lines at recvmsg time.
459 * skb->len can be stored on 16 bits since the udp header has been
460 * already validated and pulled.
464 bool csum_unnecessary
;
468 static inline struct udp_dev_scratch
*udp_skb_scratch(struct sk_buff
*skb
)
470 return (struct udp_dev_scratch
*)&skb
->dev_scratch
;
473 #if BITS_PER_LONG == 64
474 static inline unsigned int udp_skb_len(struct sk_buff
*skb
)
476 return udp_skb_scratch(skb
)->len
;
479 static inline bool udp_skb_csum_unnecessary(struct sk_buff
*skb
)
481 return udp_skb_scratch(skb
)->csum_unnecessary
;
484 static inline bool udp_skb_is_linear(struct sk_buff
*skb
)
486 return udp_skb_scratch(skb
)->is_linear
;
490 static inline unsigned int udp_skb_len(struct sk_buff
*skb
)
495 static inline bool udp_skb_csum_unnecessary(struct sk_buff
*skb
)
497 return skb_csum_unnecessary(skb
);
500 static inline bool udp_skb_is_linear(struct sk_buff
*skb
)
502 return !skb_is_nonlinear(skb
);
506 static inline int copy_linear_skb(struct sk_buff
*skb
, int len
, int off
,
509 return copy_to_iter_full(skb
->data
+ off
, len
, to
) ? 0 : -EFAULT
;
513 * SNMP statistics for UDP and UDP-Lite
515 #define UDP_INC_STATS(net, field, is_udplite) do { \
516 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
517 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
518 #define __UDP_INC_STATS(net, field, is_udplite) do { \
519 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
520 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
522 #define __UDP6_INC_STATS(net, field, is_udplite) do { \
523 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
524 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
526 #define UDP6_INC_STATS(net, field, __lite) do { \
527 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
528 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
531 #if IS_ENABLED(CONFIG_IPV6)
532 #define __UDPX_MIB(sk, ipv4) \
534 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
535 sock_net(sk)->mib.udp_statistics) : \
536 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
537 sock_net(sk)->mib.udp_stats_in6); \
540 #define __UDPX_MIB(sk, ipv4) \
542 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
543 sock_net(sk)->mib.udp_statistics; \
547 #define __UDPX_INC_STATS(sk, field) \
548 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
550 #ifdef CONFIG_PROC_FS
551 struct udp_seq_afinfo
{
553 struct udp_table
*udp_table
;
556 struct udp_iter_state
{
557 struct seq_net_private p
;
561 void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
);
562 void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
563 void udp_seq_stop(struct seq_file
*seq
, void *v
);
565 extern const struct seq_operations udp_seq_ops
;
566 extern const struct seq_operations udp6_seq_ops
;
568 int udp4_proc_init(void);
569 void udp4_proc_exit(void);
570 #endif /* CONFIG_PROC_FS */
572 int udpv4_offload_init(void);
576 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key
);
577 void udp_encap_enable(void);
578 void udp_encap_disable(void);
579 #if IS_ENABLED(CONFIG_IPV6)
580 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
581 void udpv6_encap_enable(void);
584 static inline struct sk_buff
*udp_rcv_segment(struct sock
*sk
,
585 struct sk_buff
*skb
, bool ipv4
)
587 netdev_features_t features
= NETIF_F_SG
;
588 struct sk_buff
*segs
;
590 /* Avoid csum recalculation by skb_segment unless userspace explicitly
591 * asks for the final checksum values
593 if (!inet_get_convert_csum(sk
))
594 features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
596 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
597 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
598 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
599 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
600 * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
601 * Reset in this specific case, where PARTIAL is both correct and
604 if (skb
->pkt_type
== PACKET_LOOPBACK
)
605 skb
->ip_summed
= CHECKSUM_PARTIAL
;
607 /* the GSO CB lays after the UDP one, no need to save and restore any
610 segs
= __skb_gso_segment(skb
, features
, false);
611 if (IS_ERR_OR_NULL(segs
)) {
612 int segs_nr
= skb_shinfo(skb
)->gso_segs
;
614 atomic_add(segs_nr
, &sk
->sk_drops
);
615 SNMP_ADD_STATS(__UDPX_MIB(sk
, ipv4
), UDP_MIB_INERRORS
, segs_nr
);
624 static inline void udp_post_segment_fix_csum(struct sk_buff
*skb
)
626 /* UDP-lite can't land here - no GRO */
627 WARN_ON_ONCE(UDP_SKB_CB(skb
)->partial_cov
);
629 /* UDP packets generated with UDP_SEGMENT and traversing:
631 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
633 * can reach an UDP socket with CHECKSUM_NONE, because
634 * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
635 * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
636 * have a valid checksum, as the GRO engine validates the UDP csum
637 * before the aggregation and nobody strips such info in between.
638 * Instead of adding another check in the tunnel fastpath, we can force
639 * a valid csum after the segmentation.
640 * Additionally fixup the UDP CB.
642 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
643 if (skb
->ip_summed
== CHECKSUM_NONE
&& !skb
->csum_valid
)
647 #ifdef CONFIG_BPF_SYSCALL
649 int udp_bpf_update_proto(struct sock
*sk
, struct sk_psock
*psock
, bool restore
);