2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #include <asm/system.h>
81 #include <asm/uaccess.h>
82 #include <asm/ioctls.h>
83 #include <linux/bootmem.h>
84 #include <linux/types.h>
85 #include <linux/fcntl.h>
86 #include <linux/module.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/igmp.h>
91 #include <linux/errno.h>
92 #include <linux/timer.h>
94 #include <linux/inet.h>
95 #include <linux/netdevice.h>
96 #include <net/tcp_states.h>
97 #include <linux/skbuff.h>
98 #include <linux/proc_fs.h>
99 #include <linux/seq_file.h>
100 #include <net/net_namespace.h>
101 #include <net/icmp.h>
102 #include <net/route.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include "udp_impl.h"
108 * Snmp MIB for the UDP layer
111 struct hlist_head udp_hash
[UDP_HTABLE_SIZE
];
112 DEFINE_RWLOCK(udp_hash_lock
);
114 int sysctl_udp_mem
[3] __read_mostly
;
115 int sysctl_udp_rmem_min __read_mostly
;
116 int sysctl_udp_wmem_min __read_mostly
;
118 EXPORT_SYMBOL(sysctl_udp_mem
);
119 EXPORT_SYMBOL(sysctl_udp_rmem_min
);
120 EXPORT_SYMBOL(sysctl_udp_wmem_min
);
122 atomic_t udp_memory_allocated
;
123 EXPORT_SYMBOL(udp_memory_allocated
);
125 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
126 const struct hlist_head udptable
[],
128 int (*saddr_comp
)(const struct sock
*sk1
,
129 const struct sock
*sk2
))
132 struct hlist_node
*node
;
134 sk_for_each(sk2
, node
, &udptable
[udp_hashfn(net
, num
)])
135 if (net_eq(sock_net(sk2
), net
) &&
137 sk2
->sk_hash
== num
&&
138 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
139 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
140 || sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
141 (*saddr_comp
)(sk
, sk2
))
147 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
149 * @sk: socket struct in question
150 * @snum: port number to look up
151 * @saddr_comp: AF-dependent comparison of bound local IP addresses
153 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
154 int (*saddr_comp
)(const struct sock
*sk1
,
155 const struct sock
*sk2
) )
157 struct hlist_head
*udptable
= sk
->sk_prot
->h
.udp_hash
;
159 struct net
*net
= sock_net(sk
);
161 write_lock_bh(&udp_hash_lock
);
164 int low
, high
, remaining
;
166 unsigned short first
;
168 inet_get_local_port_range(&low
, &high
);
169 remaining
= (high
- low
) + 1;
172 snum
= first
= rand
% remaining
+ low
;
174 while (udp_lib_lport_inuse(net
, snum
, udptable
, sk
,
178 } while (snum
< low
|| snum
> high
);
182 } else if (udp_lib_lport_inuse(net
, snum
, udptable
, sk
, saddr_comp
))
185 inet_sk(sk
)->num
= snum
;
187 if (sk_unhashed(sk
)) {
188 sk_add_node(sk
, &udptable
[udp_hashfn(net
, snum
)]);
189 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
193 write_unlock_bh(&udp_hash_lock
);
197 static int ipv4_rcv_saddr_equal(const struct sock
*sk1
, const struct sock
*sk2
)
199 struct inet_sock
*inet1
= inet_sk(sk1
), *inet2
= inet_sk(sk2
);
201 return ( !ipv6_only_sock(sk2
) &&
202 (!inet1
->rcv_saddr
|| !inet2
->rcv_saddr
||
203 inet1
->rcv_saddr
== inet2
->rcv_saddr
));
206 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
208 return udp_lib_get_port(sk
, snum
, ipv4_rcv_saddr_equal
);
211 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
212 * harder than this. -DaveM
214 static struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
215 __be16 sport
, __be32 daddr
, __be16 dport
,
216 int dif
, struct hlist_head udptable
[])
218 struct sock
*sk
, *result
= NULL
;
219 struct hlist_node
*node
;
220 unsigned short hnum
= ntohs(dport
);
223 read_lock(&udp_hash_lock
);
224 sk_for_each(sk
, node
, &udptable
[udp_hashfn(net
, hnum
)]) {
225 struct inet_sock
*inet
= inet_sk(sk
);
227 if (net_eq(sock_net(sk
), net
) && sk
->sk_hash
== hnum
&&
228 !ipv6_only_sock(sk
)) {
229 int score
= (sk
->sk_family
== PF_INET
? 1 : 0);
230 if (inet
->rcv_saddr
) {
231 if (inet
->rcv_saddr
!= daddr
)
236 if (inet
->daddr
!= saddr
)
241 if (inet
->dport
!= sport
)
245 if (sk
->sk_bound_dev_if
) {
246 if (sk
->sk_bound_dev_if
!= dif
)
253 } else if (score
> badness
) {
261 read_unlock(&udp_hash_lock
);
265 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
266 __be16 sport
, __be16 dport
,
267 struct hlist_head udptable
[])
270 const struct iphdr
*iph
= ip_hdr(skb
);
272 if (unlikely(sk
= skb_steal_sock(skb
)))
275 return __udp4_lib_lookup(dev_net(skb
->dst
->dev
), iph
->saddr
, sport
,
276 iph
->daddr
, dport
, inet_iif(skb
),
280 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
281 __be32 daddr
, __be16 dport
, int dif
)
283 return __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
, dif
, udp_hash
);
285 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
287 static inline struct sock
*udp_v4_mcast_next(struct net
*net
, struct sock
*sk
,
288 __be16 loc_port
, __be32 loc_addr
,
289 __be16 rmt_port
, __be32 rmt_addr
,
292 struct hlist_node
*node
;
294 unsigned short hnum
= ntohs(loc_port
);
296 sk_for_each_from(s
, node
) {
297 struct inet_sock
*inet
= inet_sk(s
);
299 if (!net_eq(sock_net(s
), net
) ||
300 s
->sk_hash
!= hnum
||
301 (inet
->daddr
&& inet
->daddr
!= rmt_addr
) ||
302 (inet
->dport
!= rmt_port
&& inet
->dport
) ||
303 (inet
->rcv_saddr
&& inet
->rcv_saddr
!= loc_addr
) ||
305 (s
->sk_bound_dev_if
&& s
->sk_bound_dev_if
!= dif
))
307 if (!ip_mc_sf_allow(s
, loc_addr
, rmt_addr
, dif
))
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code.
321 * Header points to the ip header of the error packet. We move
322 * on past this. Then (as it used to claim before adjustment)
323 * header points to the first 8 bytes of the udp header. We need
324 * to find the appropriate port.
327 void __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct hlist_head udptable
[])
329 struct inet_sock
*inet
;
330 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
331 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
332 const int type
= icmp_hdr(skb
)->type
;
333 const int code
= icmp_hdr(skb
)->code
;
337 struct net
*net
= dev_net(skb
->dev
);
339 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
340 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
, udptable
);
342 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
343 return; /* No socket for error */
352 case ICMP_TIME_EXCEEDED
:
355 case ICMP_SOURCE_QUENCH
:
357 case ICMP_PARAMETERPROB
:
361 case ICMP_DEST_UNREACH
:
362 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
363 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
371 if (code
<= NR_ICMP_UNREACH
) {
372 harderr
= icmp_err_convert
[code
].fatal
;
373 err
= icmp_err_convert
[code
].errno
;
379 * RFC1122: OK. Passes ICMP errors back to application, as per
382 if (!inet
->recverr
) {
383 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
386 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
389 sk
->sk_error_report(sk
);
394 void udp_err(struct sk_buff
*skb
, u32 info
)
396 __udp4_lib_err(skb
, info
, udp_hash
);
400 * Throw away all pending data and cancel the corking. Socket is locked.
402 void udp_flush_pending_frames(struct sock
*sk
)
404 struct udp_sock
*up
= udp_sk(sk
);
409 ip_flush_pending_frames(sk
);
412 EXPORT_SYMBOL(udp_flush_pending_frames
);
415 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
416 * @sk: socket we are sending on
417 * @skb: sk_buff containing the filled-in UDP header
418 * (checksum field must be zeroed out)
420 static void udp4_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
421 __be32 src
, __be32 dst
, int len
)
424 struct udphdr
*uh
= udp_hdr(skb
);
427 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
429 * Only one fragment on the socket.
431 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
432 skb
->csum_offset
= offsetof(struct udphdr
, check
);
433 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, 0);
436 * HW-checksum won't work as there are two or more
437 * fragments on the socket so that all csums of sk_buffs
440 offset
= skb_transport_offset(skb
);
441 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
443 skb
->ip_summed
= CHECKSUM_NONE
;
445 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
446 csum
= csum_add(csum
, skb
->csum
);
449 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
451 uh
->check
= CSUM_MANGLED_0
;
456 * Push out all pending data as one UDP datagram. Socket is locked.
458 static int udp_push_pending_frames(struct sock
*sk
)
460 struct udp_sock
*up
= udp_sk(sk
);
461 struct inet_sock
*inet
= inet_sk(sk
);
462 struct flowi
*fl
= &inet
->cork
.fl
;
466 int is_udplite
= IS_UDPLITE(sk
);
469 /* Grab the skbuff where UDP header space exists. */
470 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
474 * Create a UDP header
477 uh
->source
= fl
->fl_ip_sport
;
478 uh
->dest
= fl
->fl_ip_dport
;
479 uh
->len
= htons(up
->len
);
482 if (is_udplite
) /* UDP-Lite */
483 csum
= udplite_csum_outgoing(sk
, skb
);
485 else if (sk
->sk_no_check
== UDP_CSUM_NOXMIT
) { /* UDP csum disabled */
487 skb
->ip_summed
= CHECKSUM_NONE
;
490 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
492 udp4_hwcsum_outgoing(sk
, skb
, fl
->fl4_src
,fl
->fl4_dst
, up
->len
);
495 } else /* `normal' UDP */
496 csum
= udp_csum_outgoing(sk
, skb
);
498 /* add protocol-dependent pseudo-header */
499 uh
->check
= csum_tcpudp_magic(fl
->fl4_src
, fl
->fl4_dst
, up
->len
,
500 sk
->sk_protocol
, csum
);
502 uh
->check
= CSUM_MANGLED_0
;
505 err
= ip_push_pending_frames(sk
);
510 UDP_INC_STATS_USER(sock_net(sk
),
511 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
515 int udp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
518 struct inet_sock
*inet
= inet_sk(sk
);
519 struct udp_sock
*up
= udp_sk(sk
);
521 struct ipcm_cookie ipc
;
522 struct rtable
*rt
= NULL
;
525 __be32 daddr
, faddr
, saddr
;
528 int err
, is_udplite
= IS_UDPLITE(sk
);
529 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
530 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
539 if (msg
->msg_flags
&MSG_OOB
) /* Mirror BSD error message compatibility */
546 * There are pending frames.
547 * The socket lock must be held while it's corked.
550 if (likely(up
->pending
)) {
551 if (unlikely(up
->pending
!= AF_INET
)) {
559 ulen
+= sizeof(struct udphdr
);
562 * Get and verify the address.
565 struct sockaddr_in
* usin
= (struct sockaddr_in
*)msg
->msg_name
;
566 if (msg
->msg_namelen
< sizeof(*usin
))
568 if (usin
->sin_family
!= AF_INET
) {
569 if (usin
->sin_family
!= AF_UNSPEC
)
570 return -EAFNOSUPPORT
;
573 daddr
= usin
->sin_addr
.s_addr
;
574 dport
= usin
->sin_port
;
578 if (sk
->sk_state
!= TCP_ESTABLISHED
)
579 return -EDESTADDRREQ
;
582 /* Open fast path for connected socket.
583 Route will not be used, if at least one option is set.
587 ipc
.addr
= inet
->saddr
;
589 ipc
.oif
= sk
->sk_bound_dev_if
;
590 if (msg
->msg_controllen
) {
591 err
= ip_cmsg_send(sock_net(sk
), msg
, &ipc
);
602 ipc
.addr
= faddr
= daddr
;
604 if (ipc
.opt
&& ipc
.opt
->srr
) {
607 faddr
= ipc
.opt
->faddr
;
610 tos
= RT_TOS(inet
->tos
);
611 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
612 (msg
->msg_flags
& MSG_DONTROUTE
) ||
613 (ipc
.opt
&& ipc
.opt
->is_strictroute
)) {
618 if (ipv4_is_multicast(daddr
)) {
620 ipc
.oif
= inet
->mc_index
;
622 saddr
= inet
->mc_addr
;
627 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
630 struct flowi fl
= { .oif
= ipc
.oif
,
635 .proto
= sk
->sk_protocol
,
636 .flags
= inet_sk_flowi_flags(sk
),
638 { .sport
= inet
->sport
,
639 .dport
= dport
} } };
640 struct net
*net
= sock_net(sk
);
642 security_sk_classify_flow(sk
, &fl
);
643 err
= ip_route_output_flow(net
, &rt
, &fl
, sk
, 1);
645 if (err
== -ENETUNREACH
)
646 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
651 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
652 !sock_flag(sk
, SOCK_BROADCAST
))
655 sk_dst_set(sk
, dst_clone(&rt
->u
.dst
));
658 if (msg
->msg_flags
&MSG_CONFIRM
)
664 daddr
= ipc
.addr
= rt
->rt_dst
;
667 if (unlikely(up
->pending
)) {
668 /* The socket is already corked while preparing it. */
669 /* ... which is an evident application bug. --ANK */
672 LIMIT_NETDEBUG(KERN_DEBUG
"udp cork app bug 2\n");
677 * Now cork the socket to pend data.
679 inet
->cork
.fl
.fl4_dst
= daddr
;
680 inet
->cork
.fl
.fl_ip_dport
= dport
;
681 inet
->cork
.fl
.fl4_src
= saddr
;
682 inet
->cork
.fl
.fl_ip_sport
= inet
->sport
;
683 up
->pending
= AF_INET
;
687 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
688 err
= ip_append_data(sk
, getfrag
, msg
->msg_iov
, ulen
,
689 sizeof(struct udphdr
), &ipc
, rt
,
690 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
692 udp_flush_pending_frames(sk
);
694 err
= udp_push_pending_frames(sk
);
695 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
706 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
707 * ENOBUFS might not be good (it's not tunable per se), but otherwise
708 * we don't have a good statistic (IpOutDiscards but it can be too many
709 * things). We could add another new stat but at least for now that
710 * seems like overkill.
712 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
713 UDP_INC_STATS_USER(sock_net(sk
),
714 UDP_MIB_SNDBUFERRORS
, is_udplite
);
719 dst_confirm(&rt
->u
.dst
);
720 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
721 goto back_from_confirm
;
726 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
727 size_t size
, int flags
)
729 struct udp_sock
*up
= udp_sk(sk
);
733 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
735 /* Call udp_sendmsg to specify destination address which
736 * sendpage interface can't pass.
737 * This will succeed only when the socket is connected.
739 ret
= udp_sendmsg(NULL
, sk
, &msg
, 0);
746 if (unlikely(!up
->pending
)) {
749 LIMIT_NETDEBUG(KERN_DEBUG
"udp cork app bug 3\n");
753 ret
= ip_append_page(sk
, page
, offset
, size
, flags
);
754 if (ret
== -EOPNOTSUPP
) {
756 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
760 udp_flush_pending_frames(sk
);
765 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
766 ret
= udp_push_pending_frames(sk
);
775 * IOCTL requests applicable to the UDP protocol
778 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
783 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
784 return put_user(amount
, (int __user
*)arg
);
790 unsigned long amount
;
793 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
794 skb
= skb_peek(&sk
->sk_receive_queue
);
797 * We will only return the amount
798 * of this packet since that is all
801 amount
= skb
->len
- sizeof(struct udphdr
);
803 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
804 return put_user(amount
, (int __user
*)arg
);
815 * This should be easy, if there is something there we
816 * return it, otherwise we block.
819 int udp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
820 size_t len
, int noblock
, int flags
, int *addr_len
)
822 struct inet_sock
*inet
= inet_sk(sk
);
823 struct sockaddr_in
*sin
= (struct sockaddr_in
*)msg
->msg_name
;
825 unsigned int ulen
, copied
;
828 int is_udplite
= IS_UDPLITE(sk
);
831 * Check any passed addresses
834 *addr_len
=sizeof(*sin
);
836 if (flags
& MSG_ERRQUEUE
)
837 return ip_recv_error(sk
, msg
, len
);
840 skb
= __skb_recv_datagram(sk
, flags
| (noblock
? MSG_DONTWAIT
: 0),
845 ulen
= skb
->len
- sizeof(struct udphdr
);
849 else if (copied
< ulen
)
850 msg
->msg_flags
|= MSG_TRUNC
;
853 * If checksum is needed at all, try to do it while copying the
854 * data. If the data is truncated, or if we only want a partial
855 * coverage checksum (UDP-Lite), do it before the copy.
858 if (copied
< ulen
|| UDP_SKB_CB(skb
)->partial_cov
) {
859 if (udp_lib_checksum_complete(skb
))
863 if (skb_csum_unnecessary(skb
))
864 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
),
865 msg
->msg_iov
, copied
);
867 err
= skb_copy_and_csum_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
);
877 UDP_INC_STATS_USER(sock_net(sk
),
878 UDP_MIB_INDATAGRAMS
, is_udplite
);
880 sock_recv_timestamp(msg
, sk
, skb
);
882 /* Copy the address. */
885 sin
->sin_family
= AF_INET
;
886 sin
->sin_port
= udp_hdr(skb
)->source
;
887 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
888 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
890 if (inet
->cmsg_flags
)
891 ip_cmsg_recv(msg
, skb
);
894 if (flags
& MSG_TRUNC
)
899 skb_free_datagram(sk
, skb
);
906 if (!skb_kill_datagram(sk
, skb
, flags
))
907 UDP_INC_STATS_USER(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
916 int udp_disconnect(struct sock
*sk
, int flags
)
918 struct inet_sock
*inet
= inet_sk(sk
);
920 * 1003.1g - break association.
923 sk
->sk_state
= TCP_CLOSE
;
926 sk
->sk_bound_dev_if
= 0;
927 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
928 inet_reset_saddr(sk
);
930 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
931 sk
->sk_prot
->unhash(sk
);
938 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
940 int is_udplite
= IS_UDPLITE(sk
);
943 if ((rc
= sock_queue_rcv_skb(sk
, skb
)) < 0) {
944 /* Note that an ENOMEM error is charged twice */
946 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
954 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
962 * >0: "udp encap" protocol resubmission
964 * Note that in the success and error cases, the skb is assumed to
965 * have either been requeued or freed.
967 int udp_queue_rcv_skb(struct sock
* sk
, struct sk_buff
*skb
)
969 struct udp_sock
*up
= udp_sk(sk
);
971 int is_udplite
= IS_UDPLITE(sk
);
974 * Charge it to the socket, dropping if the queue is full.
976 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
980 if (up
->encap_type
) {
982 * This is an encapsulation socket so pass the skb to
983 * the socket's udp_encap_rcv() hook. Otherwise, just
984 * fall through and pass this up the UDP socket.
985 * up->encap_rcv() returns the following value:
986 * =0 if skb was successfully passed to the encap
987 * handler or was discarded by it.
988 * >0 if skb should be passed on to UDP.
989 * <0 if skb should be resubmitted as proto -N
992 /* if we're overly short, let UDP handle it */
993 if (skb
->len
> sizeof(struct udphdr
) &&
994 up
->encap_rcv
!= NULL
) {
997 ret
= (*up
->encap_rcv
)(sk
, skb
);
999 UDP_INC_STATS_BH(sock_net(sk
),
1000 UDP_MIB_INDATAGRAMS
,
1006 /* FALLTHROUGH -- it's a UDP Packet */
1010 * UDP-Lite specific tests, ignored on UDP sockets
1012 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
1015 * MIB statistics other than incrementing the error count are
1016 * disabled for the following two types of errors: these depend
1017 * on the application settings, not on the functioning of the
1018 * protocol stack as such.
1020 * RFC 3828 here recommends (sec 3.3): "There should also be a
1021 * way ... to ... at least let the receiving application block
1022 * delivery of packets with coverage values less than a value
1023 * provided by the application."
1025 if (up
->pcrlen
== 0) { /* full coverage was set */
1026 LIMIT_NETDEBUG(KERN_WARNING
"UDPLITE: partial coverage "
1027 "%d while full coverage %d requested\n",
1028 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
1031 /* The next case involves violating the min. coverage requested
1032 * by the receiver. This is subtle: if receiver wants x and x is
1033 * greater than the buffersize/MTU then receiver will complain
1034 * that it wants x while sender emits packets of smaller size y.
1035 * Therefore the above ...()->partial_cov statement is essential.
1037 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
1038 LIMIT_NETDEBUG(KERN_WARNING
1039 "UDPLITE: coverage %d too small, need min %d\n",
1040 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
1045 if (sk
->sk_filter
) {
1046 if (udp_lib_checksum_complete(skb
))
1053 if (!sock_owned_by_user(sk
))
1054 rc
= __udp_queue_rcv_skb(sk
, skb
);
1056 sk_add_backlog(sk
, skb
);
1062 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1068 * Multicasts and broadcasts go to each listener.
1070 * Note: called only from the BH handler context,
1071 * so we don't need to lock the hashes.
1073 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
1075 __be32 saddr
, __be32 daddr
,
1076 struct hlist_head udptable
[])
1081 read_lock(&udp_hash_lock
);
1082 sk
= sk_head(&udptable
[udp_hashfn(net
, ntohs(uh
->dest
))]);
1083 dif
= skb
->dev
->ifindex
;
1084 sk
= udp_v4_mcast_next(net
, sk
, uh
->dest
, daddr
, uh
->source
, saddr
, dif
);
1086 struct sock
*sknext
= NULL
;
1089 struct sk_buff
*skb1
= skb
;
1091 sknext
= udp_v4_mcast_next(net
, sk_next(sk
), uh
->dest
,
1092 daddr
, uh
->source
, saddr
,
1095 skb1
= skb_clone(skb
, GFP_ATOMIC
);
1098 int ret
= udp_queue_rcv_skb(sk
, skb1
);
1100 /* we should probably re-process instead
1101 * of dropping packets here. */
1108 read_unlock(&udp_hash_lock
);
1112 /* Initialize UDP checksum. If exited with zero value (success),
1113 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1114 * Otherwise, csum completion requires chacksumming packet body,
1115 * including udp header and folding it to skb->csum.
1117 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
1120 const struct iphdr
*iph
;
1123 UDP_SKB_CB(skb
)->partial_cov
= 0;
1124 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
1126 if (proto
== IPPROTO_UDPLITE
) {
1127 err
= udplite_checksum_init(skb
, uh
);
1133 if (uh
->check
== 0) {
1134 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1135 } else if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1136 if (!csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, skb
->len
,
1138 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1140 if (!skb_csum_unnecessary(skb
))
1141 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1142 skb
->len
, proto
, 0);
1143 /* Probably, we should checksum udp header (it should be in cache
1144 * in any case) and data in tiny packets (< rx copybreak).
1151 * All we need to do is get the socket, and then do a checksum.
1154 int __udp4_lib_rcv(struct sk_buff
*skb
, struct hlist_head udptable
[],
1158 struct udphdr
*uh
= udp_hdr(skb
);
1159 unsigned short ulen
;
1160 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1161 __be32 saddr
= ip_hdr(skb
)->saddr
;
1162 __be32 daddr
= ip_hdr(skb
)->daddr
;
1163 struct net
*net
= dev_net(skb
->dev
);
1166 * Validate the packet.
1168 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
1169 goto drop
; /* No space for header. */
1171 ulen
= ntohs(uh
->len
);
1172 if (ulen
> skb
->len
)
1175 if (proto
== IPPROTO_UDP
) {
1176 /* UDP validates ulen. */
1177 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
1182 if (udp4_csum_init(skb
, uh
, proto
))
1185 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
1186 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
1187 saddr
, daddr
, udptable
);
1189 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
1192 int ret
= udp_queue_rcv_skb(sk
, skb
);
1195 /* a return value > 0 means to resubmit the input, but
1196 * it wants the return to be -protocol, or 0
1203 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1207 /* No socket. Drop packet silently, if checksum is wrong */
1208 if (udp_lib_checksum_complete(skb
))
1211 UDP_INC_STATS_BH(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
1212 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
1215 * Hmm. We got an UDP packet to a port to which we
1216 * don't wanna listen. Ignore it.
1222 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: short packet: From " NIPQUAD_FMT
":%u %d/%d to " NIPQUAD_FMT
":%u\n",
1223 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
1234 * RFC1122: OK. Discards the bad packet silently (as far as
1235 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1237 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: bad checksum. From " NIPQUAD_FMT
":%u to " NIPQUAD_FMT
":%u ulen %d\n",
1238 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
1245 UDP_INC_STATS_BH(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
1250 int udp_rcv(struct sk_buff
*skb
)
1252 return __udp4_lib_rcv(skb
, udp_hash
, IPPROTO_UDP
);
1255 void udp_destroy_sock(struct sock
*sk
)
1258 udp_flush_pending_frames(sk
);
1263 * Socket option code for UDP
1265 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
1266 char __user
*optval
, int optlen
,
1267 int (*push_pending_frames
)(struct sock
*))
1269 struct udp_sock
*up
= udp_sk(sk
);
1272 int is_udplite
= IS_UDPLITE(sk
);
1274 if (optlen
<sizeof(int))
1277 if (get_user(val
, (int __user
*)optval
))
1287 (*push_pending_frames
)(sk
);
1295 case UDP_ENCAP_ESPINUDP
:
1296 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1297 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
1299 case UDP_ENCAP_L2TPINUDP
:
1300 up
->encap_type
= val
;
1309 * UDP-Lite's partial checksum coverage (RFC 3828).
1311 /* The sender sets actual checksum coverage length via this option.
1312 * The case coverage > packet length is handled by send module. */
1313 case UDPLITE_SEND_CSCOV
:
1314 if (!is_udplite
) /* Disable the option on UDP sockets */
1315 return -ENOPROTOOPT
;
1316 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
1318 else if (val
> USHORT_MAX
)
1321 up
->pcflag
|= UDPLITE_SEND_CC
;
1324 /* The receiver specifies a minimum checksum coverage value. To make
1325 * sense, this should be set to at least 8 (as done below). If zero is
1326 * used, this again means full checksum coverage. */
1327 case UDPLITE_RECV_CSCOV
:
1328 if (!is_udplite
) /* Disable the option on UDP sockets */
1329 return -ENOPROTOOPT
;
1330 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
1332 else if (val
> USHORT_MAX
)
1335 up
->pcflag
|= UDPLITE_RECV_CC
;
1346 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1347 char __user
*optval
, int optlen
)
1349 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1350 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1351 udp_push_pending_frames
);
1352 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1355 #ifdef CONFIG_COMPAT
1356 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1357 char __user
*optval
, int optlen
)
1359 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1360 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1361 udp_push_pending_frames
);
1362 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1366 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
1367 char __user
*optval
, int __user
*optlen
)
1369 struct udp_sock
*up
= udp_sk(sk
);
1372 if (get_user(len
,optlen
))
1375 len
= min_t(unsigned int, len
, sizeof(int));
1386 val
= up
->encap_type
;
1389 /* The following two cannot be changed on UDP sockets, the return is
1390 * always 0 (which corresponds to the full checksum coverage of UDP). */
1391 case UDPLITE_SEND_CSCOV
:
1395 case UDPLITE_RECV_CSCOV
:
1400 return -ENOPROTOOPT
;
1403 if (put_user(len
, optlen
))
1405 if (copy_to_user(optval
, &val
,len
))
1410 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1411 char __user
*optval
, int __user
*optlen
)
1413 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1414 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1415 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1418 #ifdef CONFIG_COMPAT
1419 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1420 char __user
*optval
, int __user
*optlen
)
1422 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1423 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1424 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1428 * udp_poll - wait for a UDP event.
1429 * @file - file struct
1431 * @wait - poll table
1433 * This is same as datagram poll, except for the special case of
1434 * blocking sockets. If application is using a blocking fd
1435 * and a packet with checksum error is in the queue;
1436 * then it could get return from select indicating data available
1437 * but then block when reading it. Add special case code
1438 * to work around these arguably broken applications.
1440 unsigned int udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1442 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1443 struct sock
*sk
= sock
->sk
;
1444 int is_lite
= IS_UDPLITE(sk
);
1446 /* Check for false positives due to checksum errors */
1447 if ( (mask
& POLLRDNORM
) &&
1448 !(file
->f_flags
& O_NONBLOCK
) &&
1449 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)){
1450 struct sk_buff_head
*rcvq
= &sk
->sk_receive_queue
;
1451 struct sk_buff
*skb
;
1453 spin_lock_bh(&rcvq
->lock
);
1454 while ((skb
= skb_peek(rcvq
)) != NULL
&&
1455 udp_lib_checksum_complete(skb
)) {
1456 UDP_INC_STATS_BH(sock_net(sk
),
1457 UDP_MIB_INERRORS
, is_lite
);
1458 __skb_unlink(skb
, rcvq
);
1461 spin_unlock_bh(&rcvq
->lock
);
1463 /* nothing to see, move along */
1465 mask
&= ~(POLLIN
| POLLRDNORM
);
1472 struct proto udp_prot
= {
1474 .owner
= THIS_MODULE
,
1475 .close
= udp_lib_close
,
1476 .connect
= ip4_datagram_connect
,
1477 .disconnect
= udp_disconnect
,
1479 .destroy
= udp_destroy_sock
,
1480 .setsockopt
= udp_setsockopt
,
1481 .getsockopt
= udp_getsockopt
,
1482 .sendmsg
= udp_sendmsg
,
1483 .recvmsg
= udp_recvmsg
,
1484 .sendpage
= udp_sendpage
,
1485 .backlog_rcv
= __udp_queue_rcv_skb
,
1486 .hash
= udp_lib_hash
,
1487 .unhash
= udp_lib_unhash
,
1488 .get_port
= udp_v4_get_port
,
1489 .memory_allocated
= &udp_memory_allocated
,
1490 .sysctl_mem
= sysctl_udp_mem
,
1491 .sysctl_wmem
= &sysctl_udp_wmem_min
,
1492 .sysctl_rmem
= &sysctl_udp_rmem_min
,
1493 .obj_size
= sizeof(struct udp_sock
),
1494 .h
.udp_hash
= udp_hash
,
1495 #ifdef CONFIG_COMPAT
1496 .compat_setsockopt
= compat_udp_setsockopt
,
1497 .compat_getsockopt
= compat_udp_getsockopt
,
1501 /* ------------------------------------------------------------------------ */
1502 #ifdef CONFIG_PROC_FS
1504 static struct sock
*udp_get_first(struct seq_file
*seq
)
1507 struct udp_iter_state
*state
= seq
->private;
1508 struct net
*net
= seq_file_net(seq
);
1510 for (state
->bucket
= 0; state
->bucket
< UDP_HTABLE_SIZE
; ++state
->bucket
) {
1511 struct hlist_node
*node
;
1512 sk_for_each(sk
, node
, state
->hashtable
+ state
->bucket
) {
1513 if (!net_eq(sock_net(sk
), net
))
1515 if (sk
->sk_family
== state
->family
)
1524 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
1526 struct udp_iter_state
*state
= seq
->private;
1527 struct net
*net
= seq_file_net(seq
);
1533 } while (sk
&& (!net_eq(sock_net(sk
), net
) || sk
->sk_family
!= state
->family
));
1535 if (!sk
&& ++state
->bucket
< UDP_HTABLE_SIZE
) {
1536 sk
= sk_head(state
->hashtable
+ state
->bucket
);
1542 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
1544 struct sock
*sk
= udp_get_first(seq
);
1547 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
1549 return pos
? NULL
: sk
;
1552 static void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1553 __acquires(udp_hash_lock
)
1555 read_lock(&udp_hash_lock
);
1556 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
1559 static void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1563 if (v
== SEQ_START_TOKEN
)
1564 sk
= udp_get_idx(seq
, 0);
1566 sk
= udp_get_next(seq
, v
);
1572 static void udp_seq_stop(struct seq_file
*seq
, void *v
)
1573 __releases(udp_hash_lock
)
1575 read_unlock(&udp_hash_lock
);
1578 static int udp_seq_open(struct inode
*inode
, struct file
*file
)
1580 struct udp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
1581 struct udp_iter_state
*s
;
1584 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
1585 sizeof(struct udp_iter_state
));
1589 s
= ((struct seq_file
*)file
->private_data
)->private;
1590 s
->family
= afinfo
->family
;
1591 s
->hashtable
= afinfo
->hashtable
;
1595 /* ------------------------------------------------------------------------ */
1596 int udp_proc_register(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
1598 struct proc_dir_entry
*p
;
1601 afinfo
->seq_fops
.open
= udp_seq_open
;
1602 afinfo
->seq_fops
.read
= seq_read
;
1603 afinfo
->seq_fops
.llseek
= seq_lseek
;
1604 afinfo
->seq_fops
.release
= seq_release_net
;
1606 afinfo
->seq_ops
.start
= udp_seq_start
;
1607 afinfo
->seq_ops
.next
= udp_seq_next
;
1608 afinfo
->seq_ops
.stop
= udp_seq_stop
;
1610 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
1611 &afinfo
->seq_fops
, afinfo
);
1617 void udp_proc_unregister(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
1619 proc_net_remove(net
, afinfo
->name
);
1622 /* ------------------------------------------------------------------------ */
1623 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
1624 int bucket
, int *len
)
1626 struct inet_sock
*inet
= inet_sk(sp
);
1627 __be32 dest
= inet
->daddr
;
1628 __be32 src
= inet
->rcv_saddr
;
1629 __u16 destp
= ntohs(inet
->dport
);
1630 __u16 srcp
= ntohs(inet
->sport
);
1632 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
1633 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
1634 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
1635 atomic_read(&sp
->sk_wmem_alloc
),
1636 atomic_read(&sp
->sk_rmem_alloc
),
1637 0, 0L, 0, sock_i_uid(sp
), 0, sock_i_ino(sp
),
1638 atomic_read(&sp
->sk_refcnt
), sp
,
1639 atomic_read(&sp
->sk_drops
), len
);
1642 int udp4_seq_show(struct seq_file
*seq
, void *v
)
1644 if (v
== SEQ_START_TOKEN
)
1645 seq_printf(seq
, "%-127s\n",
1646 " sl local_address rem_address st tx_queue "
1647 "rx_queue tr tm->when retrnsmt uid timeout "
1648 "inode ref pointer drops");
1650 struct udp_iter_state
*state
= seq
->private;
1653 udp4_format_sock(v
, seq
, state
->bucket
, &len
);
1654 seq_printf(seq
, "%*s\n", 127 - len
,"");
1659 /* ------------------------------------------------------------------------ */
1660 static struct udp_seq_afinfo udp4_seq_afinfo
= {
1663 .hashtable
= udp_hash
,
1665 .owner
= THIS_MODULE
,
1668 .show
= udp4_seq_show
,
1672 static int udp4_proc_init_net(struct net
*net
)
1674 return udp_proc_register(net
, &udp4_seq_afinfo
);
1677 static void udp4_proc_exit_net(struct net
*net
)
1679 udp_proc_unregister(net
, &udp4_seq_afinfo
);
1682 static struct pernet_operations udp4_net_ops
= {
1683 .init
= udp4_proc_init_net
,
1684 .exit
= udp4_proc_exit_net
,
1687 int __init
udp4_proc_init(void)
1689 return register_pernet_subsys(&udp4_net_ops
);
1692 void udp4_proc_exit(void)
1694 unregister_pernet_subsys(&udp4_net_ops
);
1696 #endif /* CONFIG_PROC_FS */
1698 void __init
udp_init(void)
1700 unsigned long limit
;
1702 /* Set the pressure threshold up by the same strategy of TCP. It is a
1703 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
1704 * toward zero with the amount of memory, with a floor of 128 pages.
1706 limit
= min(nr_all_pages
, 1UL<<(28-PAGE_SHIFT
)) >> (20-PAGE_SHIFT
);
1707 limit
= (limit
* (nr_all_pages
>> (20-PAGE_SHIFT
))) >> (PAGE_SHIFT
-11);
1708 limit
= max(limit
, 128UL);
1709 sysctl_udp_mem
[0] = limit
/ 4 * 3;
1710 sysctl_udp_mem
[1] = limit
;
1711 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
1713 sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
1714 sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;
1717 EXPORT_SYMBOL(udp_disconnect
);
1718 EXPORT_SYMBOL(udp_hash
);
1719 EXPORT_SYMBOL(udp_hash_lock
);
1720 EXPORT_SYMBOL(udp_ioctl
);
1721 EXPORT_SYMBOL(udp_prot
);
1722 EXPORT_SYMBOL(udp_sendmsg
);
1723 EXPORT_SYMBOL(udp_lib_getsockopt
);
1724 EXPORT_SYMBOL(udp_lib_setsockopt
);
1725 EXPORT_SYMBOL(udp_poll
);
1726 EXPORT_SYMBOL(udp_lib_get_port
);
1728 #ifdef CONFIG_PROC_FS
1729 EXPORT_SYMBOL(udp_proc_register
);
1730 EXPORT_SYMBOL(udp_proc_unregister
);