3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
10 * $Id: udp.c,v 1.45 1999/08/20 11:06:32 davem Exp $
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/sched.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <asm/uaccess.h>
37 #include <net/ndisc.h>
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/ip6_route.h>
41 #include <net/addrconf.h>
44 #include <net/inet_common.h>
46 #include <net/checksum.h>
48 struct udp_mib udp_stats_in6
;
50 /* Grrr, addr_type already calculated by caller, but I don't want
51 * to add some silly "cookie" argument to this method just for that.
53 static int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
55 write_lock_bh(&udp_hash_lock
);
57 int best_size_so_far
, best
, result
, i
;
59 if (udp_port_rover
> sysctl_local_port_range
[1] ||
60 udp_port_rover
< sysctl_local_port_range
[0])
61 udp_port_rover
= sysctl_local_port_range
[0];
62 best_size_so_far
= 32767;
63 best
= result
= udp_port_rover
;
64 for (i
= 0; i
< UDP_HTABLE_SIZE
; i
++, result
++) {
68 sk
= udp_hash
[result
& (UDP_HTABLE_SIZE
- 1)];
70 if (result
> sysctl_local_port_range
[1])
71 result
= sysctl_local_port_range
[0] +
72 ((result
- sysctl_local_port_range
[0]) &
73 (UDP_HTABLE_SIZE
- 1));
78 if (++size
>= best_size_so_far
)
80 } while ((sk
= sk
->next
) != NULL
);
81 best_size_so_far
= size
;
86 for(;; result
+= UDP_HTABLE_SIZE
) {
87 if (result
> sysctl_local_port_range
[1])
88 result
= sysctl_local_port_range
[0]
89 + ((result
- sysctl_local_port_range
[0]) &
90 (UDP_HTABLE_SIZE
- 1));
91 if (!udp_lport_inuse(result
))
95 udp_port_rover
= snum
= result
;
98 int addr_type
= ipv6_addr_type(&sk
->net_pinfo
.af_inet6
.rcv_saddr
);
100 for (sk2
= udp_hash
[snum
& (UDP_HTABLE_SIZE
- 1)];
103 if (sk2
->num
== snum
&&
105 sk2
->bound_dev_if
== sk
->bound_dev_if
&&
107 addr_type
== IPV6_ADDR_ANY
||
108 !ipv6_addr_cmp(&sk
->net_pinfo
.af_inet6
.rcv_saddr
,
109 &sk2
->net_pinfo
.af_inet6
.rcv_saddr
)) &&
110 (!sk2
->reuse
|| !sk
->reuse
))
116 write_unlock_bh(&udp_hash_lock
);
120 write_unlock_bh(&udp_hash_lock
);
124 static void udp_v6_hash(struct sock
*sk
)
126 struct sock
**skp
= &udp_hash
[sk
->num
& (UDP_HTABLE_SIZE
- 1)];
128 write_lock_bh(&udp_hash_lock
);
129 if ((sk
->next
= *skp
) != NULL
)
130 (*skp
)->pprev
= &sk
->next
;
134 if(sk
->prot
->highestinuse
< sk
->prot
->inuse
)
135 sk
->prot
->highestinuse
= sk
->prot
->inuse
;
137 write_unlock_bh(&udp_hash_lock
);
140 static void udp_v6_unhash(struct sock
*sk
)
142 write_lock_bh(&udp_hash_lock
);
145 sk
->next
->pprev
= sk
->pprev
;
146 *sk
->pprev
= sk
->next
;
151 write_unlock_bh(&udp_hash_lock
);
154 static struct sock
*udp_v6_lookup(struct in6_addr
*saddr
, u16 sport
,
155 struct in6_addr
*daddr
, u16 dport
, int dif
)
157 struct sock
*sk
, *result
= NULL
;
158 unsigned short hnum
= ntohs(dport
);
161 read_lock(&udp_hash_lock
);
162 for(sk
= udp_hash
[hnum
& (UDP_HTABLE_SIZE
- 1)]; sk
!= NULL
; sk
= sk
->next
) {
163 if((sk
->num
== hnum
) &&
164 (sk
->family
== PF_INET6
)) {
165 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
168 if(sk
->dport
!= sport
)
172 if(!ipv6_addr_any(&np
->rcv_saddr
)) {
173 if(ipv6_addr_cmp(&np
->rcv_saddr
, daddr
))
177 if(!ipv6_addr_any(&np
->daddr
)) {
178 if(ipv6_addr_cmp(&np
->daddr
, saddr
))
182 if(sk
->bound_dev_if
) {
183 if(sk
->bound_dev_if
!= dif
)
190 } else if(score
> badness
) {
198 read_unlock(&udp_hash_lock
);
206 int udpv6_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
208 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
209 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
210 struct in6_addr
*daddr
;
211 struct in6_addr saddr
;
212 struct dst_entry
*dst
;
214 struct ip6_flowlabel
*flowlabel
= NULL
;
218 if (usin
->sin6_family
== AF_INET
) {
219 err
= udp_connect(sk
, uaddr
, addr_len
);
223 if (addr_len
< sizeof(*usin
))
226 if (usin
->sin6_family
!= AF_INET6
)
227 return -EAFNOSUPPORT
;
229 fl
.fl6_flowlabel
= 0;
231 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
232 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
233 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
234 if (flowlabel
== NULL
)
236 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
240 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
242 if (addr_type
== IPV6_ADDR_ANY
) {
246 usin
->sin6_addr
.s6_addr
[15] = 0x01;
249 daddr
= &usin
->sin6_addr
;
251 if (addr_type
== IPV6_ADDR_MAPPED
) {
252 struct sockaddr_in sin
;
254 sin
.sin_family
= AF_INET
;
255 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
256 sin
.sin_port
= usin
->sin6_port
;
258 err
= udp_connect(sk
, (struct sockaddr
*) &sin
, sizeof(sin
));
264 ipv6_addr_set(&np
->daddr
, 0, 0,
265 __constant_htonl(0x0000ffff),
268 if(ipv6_addr_any(&np
->saddr
)) {
269 ipv6_addr_set(&np
->saddr
, 0, 0,
270 __constant_htonl(0x0000ffff),
275 if(ipv6_addr_any(&np
->rcv_saddr
)) {
276 ipv6_addr_set(&np
->rcv_saddr
, 0, 0,
277 __constant_htonl(0x0000ffff),
283 ipv6_addr_copy(&np
->daddr
, daddr
);
284 np
->flow_label
= fl
.fl6_flowlabel
;
286 sk
->dport
= usin
->sin6_port
;
289 * Check for a route to destination an obtain the
290 * destination cache for it.
293 fl
.proto
= IPPROTO_UDP
;
294 fl
.fl6_dst
= &np
->daddr
;
296 fl
.oif
= sk
->bound_dev_if
;
297 fl
.uli_u
.ports
.dport
= sk
->dport
;
298 fl
.uli_u
.ports
.sport
= sk
->sport
;
301 if (flowlabel
->opt
&& flowlabel
->opt
->srcrt
) {
302 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) flowlabel
->opt
->srcrt
;
303 fl
.fl6_dst
= rt0
->addr
;
305 } else if (np
->opt
&& np
->opt
->srcrt
) {
306 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) np
->opt
->srcrt
;
307 fl
.fl6_dst
= rt0
->addr
;
310 dst
= ip6_route_output(sk
, &fl
);
312 if ((err
= dst
->error
) != 0) {
314 fl6_sock_release(flowlabel
);
318 ip6_dst_store(sk
, dst
, fl
.fl6_dst
);
320 /* get the source adddress used in the apropriate device */
322 err
= ipv6_get_saddr(dst
, daddr
, &saddr
);
325 if(ipv6_addr_any(&np
->saddr
))
326 ipv6_addr_copy(&np
->saddr
, &saddr
);
328 if(ipv6_addr_any(&np
->rcv_saddr
)) {
329 ipv6_addr_copy(&np
->rcv_saddr
, &saddr
);
330 sk
->rcv_saddr
= 0xffffffff;
332 sk
->state
= TCP_ESTABLISHED
;
334 fl6_sock_release(flowlabel
);
339 static void udpv6_close(struct sock
*sk
, long timeout
)
341 inet_sock_release(sk
);
344 #ifndef HAVE_CSUM_COPY_USER
345 #undef CONFIG_UDP_DELAY_CSUM
349 * This should be easy, if there is something there we
350 * return it, otherwise we block.
353 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, int len
,
354 int noblock
, int flags
, int *addr_len
)
360 *addr_len
=sizeof(struct sockaddr_in6
);
362 if (flags
& MSG_ERRQUEUE
)
363 return ipv6_recv_error(sk
, msg
, len
);
365 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
369 copied
= skb
->len
- sizeof(struct udphdr
);
372 msg
->msg_flags
|= MSG_TRUNC
;
375 #ifndef CONFIG_UDP_DELAY_CSUM
376 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
),
377 msg
->msg_iov
, copied
);
379 if (skb
->ip_summed
==CHECKSUM_UNNECESSARY
) {
380 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
,
382 } else if (copied
> msg
->msg_iov
[0].iov_len
|| (msg
->msg_flags
&MSG_TRUNC
)) {
383 if ((unsigned short)csum_fold(csum_partial(skb
->h
.raw
, skb
->len
, skb
->csum
))) {
385 if (flags
&MSG_PEEK
) {
387 spin_lock_irq(&sk
->receive_queue
.lock
);
388 if (skb
== skb_peek(&sk
->receive_queue
)) {
389 __skb_unlink(skb
, &sk
->receive_queue
);
392 spin_unlock_irq(&sk
->receive_queue
.lock
);
397 /* Error for blocking case is chosen to masquerade
398 as some normal condition.
400 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
401 udp_stats_in6
.UdpInErrors
++;
404 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
), msg
->msg_iov
,
407 unsigned int csum
= csum_partial(skb
->h
.raw
, sizeof(struct udphdr
), skb
->csum
);
410 csum
= csum_and_copy_to_user((char*)&skb
->h
.uh
[1], msg
->msg_iov
[0].iov_base
, copied
, csum
, &err
);
413 if ((unsigned short)csum_fold(csum
)) {
414 /* Error for blocking case is chosen to masquerade
415 as some normal condition.
417 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
418 udp_stats_in6
.UdpInErrors
++;
426 sk
->stamp
=skb
->stamp
;
428 /* Copy the address. */
430 struct sockaddr_in6
*sin6
;
432 sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
433 sin6
->sin6_family
= AF_INET6
;
434 sin6
->sin6_port
= skb
->h
.uh
->source
;
435 sin6
->sin6_flowinfo
= 0;
437 if (skb
->protocol
== __constant_htons(ETH_P_IP
)) {
438 ipv6_addr_set(&sin6
->sin6_addr
, 0, 0,
439 __constant_htonl(0xffff), skb
->nh
.iph
->saddr
);
440 if (sk
->protinfo
.af_inet
.cmsg_flags
)
441 ip_cmsg_recv(msg
, skb
);
443 memcpy(&sin6
->sin6_addr
, &skb
->nh
.ipv6h
->saddr
,
444 sizeof(struct in6_addr
));
446 if (sk
->net_pinfo
.af_inet6
.rxopt
.all
)
447 datagram_recv_ctl(sk
, msg
, skb
);
453 skb_free_datagram(sk
, skb
);
458 void udpv6_err(struct sk_buff
*skb
, struct ipv6hdr
*hdr
,
459 struct inet6_skb_parm
*opt
,
460 int type
, int code
, unsigned char *buff
, __u32 info
)
462 struct net_device
*dev
= skb
->dev
;
463 struct in6_addr
*saddr
= &hdr
->saddr
;
464 struct in6_addr
*daddr
= &hdr
->daddr
;
469 if (buff
+ sizeof(struct udphdr
) > skb
->tail
)
472 uh
= (struct udphdr
*) buff
;
474 sk
= udp_v6_lookup(daddr
, uh
->dest
, saddr
, uh
->source
, dev
->ifindex
);
479 if (!icmpv6_err_convert(type
, code
, &err
) &&
480 !sk
->net_pinfo
.af_inet6
.recverr
)
483 if (sk
->bsdism
&& sk
->state
!=TCP_ESTABLISHED
&&
484 !sk
->net_pinfo
.af_inet6
.recverr
)
487 if (sk
->net_pinfo
.af_inet6
.recverr
)
488 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
491 sk
->error_report(sk
);
496 static inline int udpv6_queue_rcv_skb(struct sock
* sk
, struct sk_buff
*skb
)
498 #if defined(CONFIG_FILTER) && defined(CONFIG_UDP_DELAY_CSUM)
499 if (sk
->filter
&& skb
->ip_summed
!= CHECKSUM_UNNECESSARY
) {
500 if ((unsigned short)csum_fold(csum_partial(skb
->h
.raw
, skb
->len
, skb
->csum
))) {
501 udp_stats_in6
.UdpInErrors
++;
502 ipv6_statistics
.Ip6InDiscards
++;
506 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
509 if (sock_queue_rcv_skb(sk
,skb
)<0) {
510 udp_stats_in6
.UdpInErrors
++;
511 ipv6_statistics
.Ip6InDiscards
++;
515 ipv6_statistics
.Ip6InDelivers
++;
516 udp_stats_in6
.UdpInDatagrams
++;
520 static struct sock
*udp_v6_mcast_next(struct sock
*sk
,
521 u16 loc_port
, struct in6_addr
*loc_addr
,
522 u16 rmt_port
, struct in6_addr
*rmt_addr
,
526 unsigned short num
= ntohs(loc_port
);
527 for(; s
; s
= s
->next
) {
529 struct ipv6_pinfo
*np
= &s
->net_pinfo
.af_inet6
;
531 if(s
->dport
!= rmt_port
)
534 if(!ipv6_addr_any(&np
->daddr
) &&
535 ipv6_addr_cmp(&np
->daddr
, rmt_addr
))
538 if (s
->bound_dev_if
&& s
->bound_dev_if
!= dif
)
541 if(!ipv6_addr_any(&np
->rcv_saddr
)) {
542 if(ipv6_addr_cmp(&np
->rcv_saddr
, loc_addr
) == 0)
545 if(!inet6_mc_check(s
, loc_addr
))
554 * Note: called only from the BH handler context,
555 * so we don't need to lock the hashes.
557 static void udpv6_mcast_deliver(struct udphdr
*uh
,
558 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
561 struct sock
*sk
, *sk2
;
562 struct sk_buff
*buff
;
565 read_lock(&udp_hash_lock
);
566 sk
= udp_hash
[ntohs(uh
->dest
) & (UDP_HTABLE_SIZE
- 1)];
567 dif
= skb
->dev
->ifindex
;
568 sk
= udp_v6_mcast_next(sk
, uh
->dest
, daddr
, uh
->source
, saddr
, dif
);
574 while((sk2
= udp_v6_mcast_next(sk2
->next
, uh
->dest
, saddr
,
575 uh
->source
, daddr
, dif
))) {
577 buff
= skb_clone(skb
, GFP_ATOMIC
);
581 if (sock_queue_rcv_skb(sk2
, buff
) >= 0)
586 if (sock_queue_rcv_skb(sk
, skb
) < 0) {
590 read_unlock(&udp_hash_lock
);
593 int udpv6_rcv(struct sk_buff
*skb
, unsigned long len
)
597 struct net_device
*dev
= skb
->dev
;
598 struct in6_addr
*saddr
= &skb
->nh
.ipv6h
->saddr
;
599 struct in6_addr
*daddr
= &skb
->nh
.ipv6h
->daddr
;
603 __skb_pull(skb
, skb
->h
.raw
- skb
->data
);
605 ulen
= ntohs(uh
->len
);
607 /* Check for jumbo payload */
608 if (ulen
== 0 && skb
->nh
.ipv6h
->payload_len
== 0)
611 if (ulen
> len
|| len
< sizeof(*uh
)) {
613 printk(KERN_DEBUG
"UDP: short packet: %d/%ld\n", ulen
, len
);
614 udp_stats_in6
.UdpInErrors
++;
619 if (uh
->check
== 0) {
620 /* IPv6 draft-v2 section 8.1 says that we SHOULD log
621 this error. Well, it is reasonable.
624 printk(KERN_INFO
"IPv6: udp checksum is 0\n");
630 #ifndef CONFIG_UDP_DELAY_CSUM
631 switch (skb
->ip_summed
) {
633 skb
->csum
= csum_partial((char*)uh
, ulen
, 0);
635 if (csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, skb
->csum
)) {
636 printk(KERN_DEBUG
"IPv6: udp checksum error\n");
641 if (skb
->ip_summed
==CHECKSUM_HW
) {
642 if (csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, skb
->csum
))
644 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
645 } else if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)
646 skb
->csum
= ~csum_ipv6_magic(saddr
, daddr
, ulen
, IPPROTO_UDP
, 0);
652 * Multicast receive code
654 if (ipv6_addr_type(daddr
) & IPV6_ADDR_MULTICAST
) {
655 udpv6_mcast_deliver(uh
, saddr
, daddr
, skb
);
662 * check socket cache ... must talk to Alan about his plans
663 * for sock caches... i'll skip this for now.
666 sk
= udp_v6_lookup(saddr
, uh
->source
, daddr
, uh
->dest
, dev
->ifindex
);
669 #ifdef CONFIG_UDP_DELAY_CSUM
670 if (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
671 (unsigned short)csum_fold(csum_partial((char*)uh
, len
, skb
->csum
)))
674 udp_stats_in6
.UdpNoPorts
++;
676 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0, dev
);
681 if (0/*sk->user_callback &&
682 sk->user_callback(sk->user_data, skb) == 0*/) {
683 udp_stats_in6
.UdpInDatagrams
++;
690 udpv6_queue_rcv_skb(sk
, skb
);
695 udp_stats_in6
.UdpInErrors
++;
710 struct in6_addr
*daddr
;
717 static int udpv6_getfrag(const void *data
, struct in6_addr
*addr
,
718 char *buff
, unsigned int offset
, unsigned int len
)
720 struct udpv6fakehdr
*udh
= (struct udpv6fakehdr
*) data
;
728 offset
-= sizeof(struct udphdr
);
730 dst
+= sizeof(struct udphdr
);
732 clen
-= sizeof(struct udphdr
);
735 if (csum_partial_copy_fromiovecend(dst
, udh
->iov
, offset
,
740 struct in6_addr
*daddr
;
742 udh
->wcheck
= csum_partial((char *)udh
, sizeof(struct udphdr
),
749 * use packet destination address
750 * this should improve cache locality
754 udh
->uh
.check
= csum_ipv6_magic(addr
, daddr
,
755 udh
->pl_len
, IPPROTO_UDP
,
757 if (udh
->uh
.check
== 0)
760 memcpy(buff
, udh
, sizeof(struct udphdr
));
765 static int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, int ulen
)
767 struct ipv6_txoptions opt_space
;
768 struct udpv6fakehdr udh
;
769 struct ipv6_pinfo
*np
= &sk
->net_pinfo
.af_inet6
;
770 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
771 struct ipv6_txoptions
*opt
= NULL
;
772 struct ip6_flowlabel
*flowlabel
= NULL
;
774 int addr_len
= msg
->msg_namelen
;
775 struct in6_addr
*daddr
;
776 int len
= ulen
+ sizeof(struct udphdr
);
782 /* Rough check on arithmetic overflow,
783 better check is made in ip6_build_xmit
785 if (ulen
< 0 || ulen
> INT_MAX
- sizeof(struct udphdr
))
788 fl
.fl6_flowlabel
= 0;
791 if (sin6
->sin6_family
== AF_INET
)
792 return udp_sendmsg(sk
, msg
, ulen
);
794 if (addr_len
< sizeof(*sin6
))
797 if (sin6
->sin6_family
&& sin6
->sin6_family
!= AF_INET6
)
800 if (sin6
->sin6_port
== 0)
803 udh
.uh
.dest
= sin6
->sin6_port
;
804 daddr
= &sin6
->sin6_addr
;
807 fl
.fl6_flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
808 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
809 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
810 if (flowlabel
== NULL
)
812 daddr
= &flowlabel
->dst
;
816 /* Otherwise it will be difficult to maintain sk->dst_cache. */
817 if (sk
->state
== TCP_ESTABLISHED
&&
818 !ipv6_addr_cmp(daddr
, &sk
->net_pinfo
.af_inet6
.daddr
))
819 daddr
= &sk
->net_pinfo
.af_inet6
.daddr
;
821 if (sk
->state
!= TCP_ESTABLISHED
)
824 udh
.uh
.dest
= sk
->dport
;
825 daddr
= &sk
->net_pinfo
.af_inet6
.daddr
;
826 fl
.fl6_flowlabel
= np
->flow_label
;
829 addr_type
= ipv6_addr_type(daddr
);
831 if (addr_type
== IPV6_ADDR_MAPPED
) {
832 struct sockaddr_in sin
;
834 sin
.sin_family
= AF_INET
;
835 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
836 sin
.sin_port
= udh
.uh
.dest
;
837 msg
->msg_name
= (struct sockaddr
*)(&sin
);
838 msg
->msg_namelen
= sizeof(sin
);
839 fl6_sock_release(flowlabel
);
841 return udp_sendmsg(sk
, msg
, ulen
);
845 fl
.oif
= sk
->bound_dev_if
;
848 if (msg
->msg_controllen
) {
850 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
852 err
= datagram_send_ctl(msg
, &fl
, opt
, &hlimit
);
854 fl6_sock_release(flowlabel
);
857 if ((fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
858 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
859 if (flowlabel
== NULL
)
862 if (!(opt
->opt_nflen
|opt
->opt_flen
))
868 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
869 if (opt
&& opt
->srcrt
)
872 udh
.uh
.source
= sk
->sport
;
873 udh
.uh
.len
= len
< 0x10000 ? htons(len
) : 0;
875 udh
.iov
= msg
->msg_iov
;
879 fl
.proto
= IPPROTO_UDP
;
881 fl
.uli_u
.ports
.dport
= udh
.uh
.dest
;
882 fl
.uli_u
.ports
.sport
= udh
.uh
.source
;
884 err
= ip6_build_xmit(sk
, udpv6_getfrag
, &udh
, &fl
, len
, opt
, hlimit
,
887 fl6_sock_release(flowlabel
);
892 udp_stats_in6
.UdpOutDatagrams
++;
896 static struct inet6_protocol udpv6_protocol
=
898 udpv6_rcv
, /* UDP handler */
899 udpv6_err
, /* UDP error control */
901 IPPROTO_UDP
, /* protocol ID */
908 #define LINE_FMT "%-190s\n"
910 static void get_udp6_sock(struct sock
*sp
, char *tmpbuf
, int i
)
912 struct in6_addr
*dest
, *src
;
915 unsigned long timer_expires
;
917 dest
= &sp
->net_pinfo
.af_inet6
.daddr
;
918 src
= &sp
->net_pinfo
.af_inet6
.rcv_saddr
;
919 destp
= ntohs(sp
->dport
);
920 srcp
= ntohs(sp
->sport
);
921 timer_active
= (sp
->timer
.prev
!= NULL
) ? 2 : 0;
922 timer_expires
= (timer_active
== 2 ? sp
->timer
.expires
: jiffies
);
924 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
925 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %ld %d %p",
927 src
->s6_addr32
[0], src
->s6_addr32
[1],
928 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
929 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
930 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
932 atomic_read(&sp
->wmem_alloc
), atomic_read(&sp
->rmem_alloc
),
933 timer_active
, timer_expires
-jiffies
, 0,
934 sp
->socket
->inode
->i_uid
, 0,
935 sp
->socket
? sp
->socket
->inode
->i_ino
: 0,
936 atomic_read(&sp
->refcnt
), sp
);
939 int udp6_get_info(char *buffer
, char **start
, off_t offset
, int length
, int dummy
)
941 int len
= 0, num
= 0, i
;
944 char tmpbuf
[LINE_LEN
+2];
946 if (offset
< LINE_LEN
+1)
947 len
+= sprintf(buffer
, LINE_FMT
,
949 "local_address " /* 38 */
950 "remote_address " /* 38 */
951 "st tx_queue rx_queue tr tm->when retrnsmt" /* 41 */
952 " uid timeout inode"); /* 21 */
956 read_lock(&udp_hash_lock
);
957 for (i
= 0; i
< UDP_HTABLE_SIZE
; i
++) {
960 for (sk
= udp_hash
[i
]; sk
; sk
= sk
->next
, num
++) {
961 if (sk
->family
!= PF_INET6
)
966 get_udp6_sock(sk
, tmpbuf
, i
);
967 len
+= sprintf(buffer
+len
, LINE_FMT
, tmpbuf
);
973 read_unlock(&udp_hash_lock
);
974 begin
= len
- (pos
- offset
);
975 *start
= buffer
+ begin
;
984 struct proto udpv6_prot
= {
985 udpv6_close
, /* close */
986 udpv6_connect
, /* connect */
987 udp_disconnect
, /* disconnect */
989 NULL
, /* retransmit */
990 NULL
, /* write_wakeup */
991 NULL
, /* read_wakeup */
992 datagram_poll
, /* poll */
993 udp_ioctl
, /* ioctl */
995 inet6_destroy_sock
, /* destroy */
997 ipv6_setsockopt
, /* setsockopt */
998 ipv6_getsockopt
, /* getsockopt */
999 udpv6_sendmsg
, /* sendmsg */
1000 udpv6_recvmsg
, /* recvmsg */
1002 udpv6_queue_rcv_skb
, /* backlog_rcv */
1003 udp_v6_hash
, /* hash */
1004 udp_v6_unhash
, /* unhash */
1005 udp_v6_get_port
, /* get_port */
1006 128, /* max_header */
1007 0, /* retransmits */
1010 0 /* highestinuse */
1013 void __init
udpv6_init(void)
1015 inet6_add_protocol(&udpv6_protocol
);