1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Definitions for the UDP module.
9 * Version: @(#)udp.h 1.0.2 05/07/93
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
21 #include <linux/list.h>
22 #include <linux/bug.h>
23 #include <net/inet_sock.h>
27 #include <linux/ipv6.h>
28 #include <linux/seq_file.h>
29 #include <linux/poll.h>
32 * struct udp_skb_cb - UDP(-Lite) private variables
34 * @header: private variables used by IPv4/IPv6
35 * @cscov: checksum coverage length (UDP-Lite only)
36 * @partial_cov: if set indicates partial csum coverage
40 struct inet_skb_parm h4
;
41 #if IS_ENABLED(CONFIG_IPV6)
42 struct inet6_skb_parm h6
;
48 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
51 * struct udp_hslot - UDP hash slot
53 * @head: head of list of sockets
54 * @count: number of sockets in 'head' list
55 * @lock: spinlock protecting changes to head/count
58 struct hlist_head head
;
61 } __attribute__((aligned(2 * sizeof(long))));
64 * struct udp_table - UDP table
66 * @hash: hash table, sockets are hashed on (local port)
67 * @hash2: hash table, sockets are hashed on (local port, local address)
68 * @mask: number of slots in hash tables, minus 1
69 * @log: log2(number of slots in hash table)
72 struct udp_hslot
*hash
;
73 struct udp_hslot
*hash2
;
77 extern struct udp_table udp_table
;
78 void udp_table_init(struct udp_table
*, const char *);
79 static inline struct udp_hslot
*udp_hashslot(struct udp_table
*table
,
80 struct net
*net
, unsigned int num
)
82 return &table
->hash
[udp_hashfn(net
, num
, table
->mask
)];
85 * For secondary hash, net_hash_mix() is performed before calling
86 * udp_hashslot2(), this explains difference with udp_hashslot()
88 static inline struct udp_hslot
*udp_hashslot2(struct udp_table
*table
,
91 return &table
->hash2
[hash
& table
->mask
];
94 extern struct proto udp_prot
;
96 extern atomic_long_t udp_memory_allocated
;
98 /* sysctl variables for udp */
99 extern long sysctl_udp_mem
[3];
100 extern int sysctl_udp_rmem_min
;
101 extern int sysctl_udp_wmem_min
;
106 * Generic checksumming routines for UDP(-Lite) v4 and v6
108 static inline __sum16
__udp_lib_checksum_complete(struct sk_buff
*skb
)
110 return (UDP_SKB_CB(skb
)->cscov
== skb
->len
?
111 __skb_checksum_complete(skb
) :
112 __skb_checksum_complete_head(skb
, UDP_SKB_CB(skb
)->cscov
));
115 static inline int udp_lib_checksum_complete(struct sk_buff
*skb
)
117 return !skb_csum_unnecessary(skb
) &&
118 __udp_lib_checksum_complete(skb
);
122 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
123 * @sk: socket we are writing to
124 * @skb: sk_buff containing the filled-in UDP header
125 * (checksum field must be zeroed out)
127 static inline __wsum
udp_csum_outgoing(struct sock
*sk
, struct sk_buff
*skb
)
129 __wsum csum
= csum_partial(skb_transport_header(skb
),
130 sizeof(struct udphdr
), 0);
131 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
132 csum
= csum_add(csum
, skb
->csum
);
137 static inline __wsum
udp_csum(struct sk_buff
*skb
)
139 __wsum csum
= csum_partial(skb_transport_header(skb
),
140 sizeof(struct udphdr
), skb
->csum
);
142 for (skb
= skb_shinfo(skb
)->frag_list
; skb
; skb
= skb
->next
) {
143 csum
= csum_add(csum
, skb
->csum
);
148 static inline __sum16
udp_v4_check(int len
, __be32 saddr
,
149 __be32 daddr
, __wsum base
)
151 return csum_tcpudp_magic(saddr
, daddr
, len
, IPPROTO_UDP
, base
);
154 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
155 __be32 saddr
, __be32 daddr
, int len
);
157 static inline void udp_csum_pull_header(struct sk_buff
*skb
)
159 if (!skb
->csum_valid
&& skb
->ip_summed
== CHECKSUM_NONE
)
160 skb
->csum
= csum_partial(skb
->data
, sizeof(struct udphdr
),
162 skb_pull_rcsum(skb
, sizeof(struct udphdr
));
163 UDP_SKB_CB(skb
)->cscov
-= sizeof(struct udphdr
);
166 typedef struct sock
*(*udp_lookup_t
)(struct sk_buff
*skb
, __be16 sport
,
169 struct sk_buff
*udp_gro_receive(struct list_head
*head
, struct sk_buff
*skb
,
170 struct udphdr
*uh
, struct sock
*sk
);
171 int udp_gro_complete(struct sk_buff
*skb
, int nhoff
, udp_lookup_t lookup
);
173 struct sk_buff
*__udp_gso_segment(struct sk_buff
*gso_skb
,
174 netdev_features_t features
);
176 static inline struct udphdr
*udp_gro_udphdr(struct sk_buff
*skb
)
179 unsigned int hlen
, off
;
181 off
= skb_gro_offset(skb
);
182 hlen
= off
+ sizeof(*uh
);
183 uh
= skb_gro_header_fast(skb
, off
);
184 if (skb_gro_header_hard(skb
, hlen
))
185 uh
= skb_gro_header_slow(skb
, hlen
, off
);
190 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
191 static inline int udp_lib_hash(struct sock
*sk
)
197 void udp_lib_unhash(struct sock
*sk
);
198 void udp_lib_rehash(struct sock
*sk
, u16 new_hash
);
200 static inline void udp_lib_close(struct sock
*sk
, long timeout
)
202 sk_common_release(sk
);
205 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
206 unsigned int hash2_nulladdr
);
208 u32
udp_flow_hashrnd(void);
210 static inline __be16
udp_flow_src_port(struct net
*net
, struct sk_buff
*skb
,
211 int min
, int max
, bool use_eth
)
216 /* Use default range */
217 inet_get_local_port_range(net
, &min
, &max
);
220 hash
= skb_get_hash(skb
);
221 if (unlikely(!hash
)) {
223 /* Can't find a normal hash, caller has indicated an
224 * Ethernet packet so use that to compute a hash.
226 hash
= jhash(skb
->data
, 2 * ETH_ALEN
,
227 (__force u32
) skb
->protocol
);
229 /* Can't derive any sort of hash for the packet, set
230 * to some consistent random value.
232 hash
= udp_flow_hashrnd();
236 /* Since this is being sent on the wire obfuscate hash a bit
237 * to minimize possbility that any useful information to an
238 * attacker is leaked. Only upper 16 bits are relevant in the
239 * computation for 16 bit port value.
243 return htons((((u64
) hash
* (max
- min
)) >> 32) + min
);
246 static inline int udp_rqueue_get(struct sock
*sk
)
248 return sk_rmem_alloc_get(sk
) - READ_ONCE(udp_sk(sk
)->forward_deficit
);
251 static inline bool udp_sk_bound_dev_eq(struct net
*net
, int bound_dev_if
,
254 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
255 return inet_bound_dev_eq(!!net
->ipv4
.sysctl_udp_l3mdev_accept
,
256 bound_dev_if
, dif
, sdif
);
258 return inet_bound_dev_eq(true, bound_dev_if
, dif
, sdif
);
263 void udp_destruct_sock(struct sock
*sk
);
264 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
);
265 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
);
266 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
);
267 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
,
268 int noblock
, int *off
, int *err
);
269 static inline struct sk_buff
*skb_recv_udp(struct sock
*sk
, unsigned int flags
,
270 int noblock
, int *err
)
274 return __skb_recv_udp(sk
, flags
, noblock
, &off
, err
);
277 int udp_v4_early_demux(struct sk_buff
*skb
);
278 bool udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
);
279 int udp_get_port(struct sock
*sk
, unsigned short snum
,
280 int (*saddr_cmp
)(const struct sock
*,
281 const struct sock
*));
282 int udp_err(struct sk_buff
*, u32
);
283 int udp_abort(struct sock
*sk
, int err
);
284 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
);
285 int udp_push_pending_frames(struct sock
*sk
);
286 void udp_flush_pending_frames(struct sock
*sk
);
287 int udp_cmsg_send(struct sock
*sk
, struct msghdr
*msg
, u16
*gso_size
);
288 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
);
289 int udp_rcv(struct sk_buff
*skb
);
290 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
);
291 int udp_init_sock(struct sock
*sk
);
292 int udp_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
);
293 int __udp_disconnect(struct sock
*sk
, int flags
);
294 int udp_disconnect(struct sock
*sk
, int flags
);
295 __poll_t
udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
);
296 struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
297 netdev_features_t features
,
299 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
300 char __user
*optval
, int __user
*optlen
);
301 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
302 char __user
*optval
, unsigned int optlen
,
303 int (*push_pending_frames
)(struct sock
*));
304 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
305 __be32 daddr
, __be16 dport
, int dif
);
306 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
307 __be32 daddr
, __be16 dport
, int dif
, int sdif
,
308 struct udp_table
*tbl
, struct sk_buff
*skb
);
309 struct sock
*udp4_lib_lookup_skb(struct sk_buff
*skb
,
310 __be16 sport
, __be16 dport
);
311 struct sock
*udp6_lib_lookup(struct net
*net
,
312 const struct in6_addr
*saddr
, __be16 sport
,
313 const struct in6_addr
*daddr
, __be16 dport
,
315 struct sock
*__udp6_lib_lookup(struct net
*net
,
316 const struct in6_addr
*saddr
, __be16 sport
,
317 const struct in6_addr
*daddr
, __be16 dport
,
318 int dif
, int sdif
, struct udp_table
*tbl
,
319 struct sk_buff
*skb
);
320 struct sock
*udp6_lib_lookup_skb(struct sk_buff
*skb
,
321 __be16 sport
, __be16 dport
);
323 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
324 * possibly multiple cache miss on dequeue()
326 struct udp_dev_scratch
{
327 /* skb->truesize and the stateless bit are embedded in a single field;
328 * do not use a bitfield since the compiler emits better/smaller code
333 #if BITS_PER_LONG == 64
334 /* len and the bit needed to compute skb_csum_unnecessary
335 * will be on cold cache lines at recvmsg time.
336 * skb->len can be stored on 16 bits since the udp header has been
337 * already validated and pulled.
341 bool csum_unnecessary
;
345 static inline struct udp_dev_scratch
*udp_skb_scratch(struct sk_buff
*skb
)
347 return (struct udp_dev_scratch
*)&skb
->dev_scratch
;
350 #if BITS_PER_LONG == 64
351 static inline unsigned int udp_skb_len(struct sk_buff
*skb
)
353 return udp_skb_scratch(skb
)->len
;
356 static inline bool udp_skb_csum_unnecessary(struct sk_buff
*skb
)
358 return udp_skb_scratch(skb
)->csum_unnecessary
;
361 static inline bool udp_skb_is_linear(struct sk_buff
*skb
)
363 return udp_skb_scratch(skb
)->is_linear
;
367 static inline unsigned int udp_skb_len(struct sk_buff
*skb
)
372 static inline bool udp_skb_csum_unnecessary(struct sk_buff
*skb
)
374 return skb_csum_unnecessary(skb
);
377 static inline bool udp_skb_is_linear(struct sk_buff
*skb
)
379 return !skb_is_nonlinear(skb
);
383 static inline int copy_linear_skb(struct sk_buff
*skb
, int len
, int off
,
388 n
= copy_to_iter(skb
->data
+ off
, len
, to
);
392 iov_iter_revert(to
, n
);
397 * SNMP statistics for UDP and UDP-Lite
399 #define UDP_INC_STATS(net, field, is_udplite) do { \
400 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
401 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
402 #define __UDP_INC_STATS(net, field, is_udplite) do { \
403 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
404 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
406 #define __UDP6_INC_STATS(net, field, is_udplite) do { \
407 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
408 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
410 #define UDP6_INC_STATS(net, field, __lite) do { \
411 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
412 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
415 #if IS_ENABLED(CONFIG_IPV6)
416 #define __UDPX_MIB(sk, ipv4) \
418 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
419 sock_net(sk)->mib.udp_statistics) : \
420 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
421 sock_net(sk)->mib.udp_stats_in6); \
424 #define __UDPX_MIB(sk, ipv4) \
426 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
427 sock_net(sk)->mib.udp_statistics; \
431 #define __UDPX_INC_STATS(sk, field) \
432 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
434 #ifdef CONFIG_PROC_FS
435 struct udp_seq_afinfo
{
437 struct udp_table
*udp_table
;
440 struct udp_iter_state
{
441 struct seq_net_private p
;
445 void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
);
446 void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
);
447 void udp_seq_stop(struct seq_file
*seq
, void *v
);
449 extern const struct seq_operations udp_seq_ops
;
450 extern const struct seq_operations udp6_seq_ops
;
452 int udp4_proc_init(void);
453 void udp4_proc_exit(void);
454 #endif /* CONFIG_PROC_FS */
456 int udpv4_offload_init(void);
460 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key
);
461 void udp_encap_enable(void);
462 #if IS_ENABLED(CONFIG_IPV6)
463 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
464 void udpv6_encap_enable(void);
467 static inline struct sk_buff
*udp_rcv_segment(struct sock
*sk
,
468 struct sk_buff
*skb
, bool ipv4
)
470 netdev_features_t features
= NETIF_F_SG
;
471 struct sk_buff
*segs
;
473 /* Avoid csum recalculation by skb_segment unless userspace explicitly
474 * asks for the final checksum values
476 if (!inet_get_convert_csum(sk
))
477 features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
479 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
480 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
481 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
482 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
483 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
484 * specific case, where PARTIAL is both correct and required.
486 if (skb
->pkt_type
== PACKET_LOOPBACK
)
487 skb
->ip_summed
= CHECKSUM_PARTIAL
;
489 /* the GSO CB lays after the UDP one, no need to save and restore any
492 segs
= __skb_gso_segment(skb
, features
, false);
493 if (IS_ERR_OR_NULL(segs
)) {
494 int segs_nr
= skb_shinfo(skb
)->gso_segs
;
496 atomic_add(segs_nr
, &sk
->sk_drops
);
497 SNMP_ADD_STATS(__UDPX_MIB(sk
, ipv4
), UDP_MIB_INERRORS
, segs_nr
);
506 #ifdef CONFIG_BPF_STREAM_PARSER
508 struct proto
*udp_bpf_get_proto(struct sock
*sk
, struct sk_psock
*psock
);
509 #endif /* BPF_STREAM_PARSER */