3 * Linux INET6 implementation
5 * Based on net/dccp6/ipv6.c
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket
*dccp_v6_ctl_socket
;
39 static void dccp_v6_ctl_send_reset(struct sk_buff
*skb
);
40 static void dccp_v6_reqsk_send_ack(struct sk_buff
*skb
,
41 struct request_sock
*req
);
42 static void dccp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
);
44 static int dccp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
46 static struct inet_connection_sock_af_ops dccp_ipv6_mapped
;
47 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops
;
49 static int dccp_v6_get_port(struct sock
*sk
, unsigned short snum
)
51 return inet_csk_get_port(&dccp_hashinfo
, sk
, snum
,
52 inet6_csk_bind_conflict
);
55 static void dccp_v6_hash(struct sock
*sk
)
57 if (sk
->sk_state
!= DCCP_CLOSED
) {
58 if (inet_csk(sk
)->icsk_af_ops
== &dccp_ipv6_mapped
) {
63 __inet6_hash(&dccp_hashinfo
, sk
);
68 static inline u16
dccp_v6_check(struct dccp_hdr
*dh
, int len
,
69 struct in6_addr
*saddr
,
70 struct in6_addr
*daddr
,
73 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_DCCP
, base
);
76 static __u32
dccp_v6_init_sequence(struct sock
*sk
, struct sk_buff
*skb
)
78 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
80 if (skb
->protocol
== htons(ETH_P_IPV6
))
81 return secure_tcpv6_sequence_number(skb
->nh
.ipv6h
->daddr
.s6_addr32
,
82 skb
->nh
.ipv6h
->saddr
.s6_addr32
,
86 return secure_dccp_sequence_number(skb
->nh
.iph
->daddr
,
92 static int dccp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
95 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*)uaddr
;
96 struct inet_connection_sock
*icsk
= inet_csk(sk
);
97 struct inet_sock
*inet
= inet_sk(sk
);
98 struct ipv6_pinfo
*np
= inet6_sk(sk
);
99 struct dccp_sock
*dp
= dccp_sk(sk
);
100 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
102 struct dst_entry
*dst
;
106 dp
->dccps_role
= DCCP_ROLE_CLIENT
;
108 if (addr_len
< SIN6_LEN_RFC2133
)
111 if (usin
->sin6_family
!= AF_INET6
)
112 return -EAFNOSUPPORT
;
114 memset(&fl
, 0, sizeof(fl
));
117 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
;
118 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
119 if (fl
.fl6_flowlabel
& IPV6_FLOWLABEL_MASK
) {
120 struct ip6_flowlabel
*flowlabel
;
121 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
122 if (flowlabel
== NULL
)
124 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
125 fl6_sock_release(flowlabel
);
129 * connect() to INADDR_ANY means loopback (BSD'ism).
131 if (ipv6_addr_any(&usin
->sin6_addr
))
132 usin
->sin6_addr
.s6_addr
[15] = 1;
134 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
136 if (addr_type
& IPV6_ADDR_MULTICAST
)
139 if (addr_type
& IPV6_ADDR_LINKLOCAL
) {
140 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
141 usin
->sin6_scope_id
) {
142 /* If interface is set while binding, indices
145 if (sk
->sk_bound_dev_if
&&
146 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
149 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
152 /* Connect to link-local address requires an interface */
153 if (!sk
->sk_bound_dev_if
)
157 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
158 np
->flow_label
= fl
.fl6_flowlabel
;
163 if (addr_type
== IPV6_ADDR_MAPPED
) {
164 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
165 struct sockaddr_in sin
;
167 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
169 if (__ipv6_only_sock(sk
))
172 sin
.sin_family
= AF_INET
;
173 sin
.sin_port
= usin
->sin6_port
;
174 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
176 icsk
->icsk_af_ops
= &dccp_ipv6_mapped
;
177 sk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
179 err
= dccp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
181 icsk
->icsk_ext_hdr_len
= exthdrlen
;
182 icsk
->icsk_af_ops
= &dccp_ipv6_af_ops
;
183 sk
->sk_backlog_rcv
= dccp_v6_do_rcv
;
186 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
188 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
195 if (!ipv6_addr_any(&np
->rcv_saddr
))
196 saddr
= &np
->rcv_saddr
;
198 fl
.proto
= IPPROTO_DCCP
;
199 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
200 ipv6_addr_copy(&fl
.fl6_src
, saddr
? saddr
: &np
->saddr
);
201 fl
.oif
= sk
->sk_bound_dev_if
;
202 fl
.fl_ip_dport
= usin
->sin6_port
;
203 fl
.fl_ip_sport
= inet
->sport
;
204 security_sk_classify_flow(sk
, &fl
);
206 if (np
->opt
!= NULL
&& np
->opt
->srcrt
!= NULL
) {
207 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
209 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
210 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
214 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
219 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
221 err
= xfrm_lookup(&dst
, &fl
, sk
, 0);
227 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
230 /* set the source address */
231 ipv6_addr_copy(&np
->saddr
, saddr
);
232 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
234 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
236 icsk
->icsk_ext_hdr_len
= 0;
238 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
241 inet
->dport
= usin
->sin6_port
;
243 dccp_set_state(sk
, DCCP_REQUESTING
);
244 err
= inet6_hash_connect(&dccp_death_row
, sk
);
249 dp
->dccps_gar
= secure_dccp_v6_sequence_number(np
->saddr
.s6_addr32
,
254 err
= dccp_connect(sk
);
261 dccp_set_state(sk
, DCCP_CLOSED
);
265 sk
->sk_route_caps
= 0;
269 static void dccp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
270 int type
, int code
, int offset
, __be32 info
)
272 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
273 const struct dccp_hdr
*dh
= (struct dccp_hdr
*)(skb
->data
+ offset
);
274 struct ipv6_pinfo
*np
;
279 sk
= inet6_lookup(&dccp_hashinfo
, &hdr
->daddr
, dh
->dccph_dport
,
280 &hdr
->saddr
, dh
->dccph_sport
, skb
->dev
->ifindex
);
283 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
287 if (sk
->sk_state
== DCCP_TIME_WAIT
) {
288 inet_twsk_put(inet_twsk(sk
));
293 if (sock_owned_by_user(sk
))
294 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
296 if (sk
->sk_state
== DCCP_CLOSED
)
301 if (type
== ICMPV6_PKT_TOOBIG
) {
302 struct dst_entry
*dst
= NULL
;
304 if (sock_owned_by_user(sk
))
306 if ((1 << sk
->sk_state
) & (DCCPF_LISTEN
| DCCPF_CLOSED
))
309 /* icmp should have updated the destination cache entry */
310 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
312 struct inet_sock
*inet
= inet_sk(sk
);
315 /* BUGGG_FUTURE: Again, it is not clear how
316 to handle rthdr case. Ignore this complexity
319 memset(&fl
, 0, sizeof(fl
));
320 fl
.proto
= IPPROTO_DCCP
;
321 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
322 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
323 fl
.oif
= sk
->sk_bound_dev_if
;
324 fl
.fl_ip_dport
= inet
->dport
;
325 fl
.fl_ip_sport
= inet
->sport
;
326 security_sk_classify_flow(sk
, &fl
);
328 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
330 sk
->sk_err_soft
= -err
;
334 err
= xfrm_lookup(&dst
, &fl
, sk
, 0);
336 sk
->sk_err_soft
= -err
;
342 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
343 dccp_sync_mss(sk
, dst_mtu(dst
));
344 } /* else let the usual retransmit timer handle it */
349 icmpv6_err_convert(type
, code
, &err
);
351 seq
= DCCP_SKB_CB(skb
)->dccpd_seq
;
352 /* Might be for an request_sock */
353 switch (sk
->sk_state
) {
354 struct request_sock
*req
, **prev
;
356 if (sock_owned_by_user(sk
))
359 req
= inet6_csk_search_req(sk
, &prev
, dh
->dccph_dport
,
360 &hdr
->daddr
, &hdr
->saddr
,
366 * ICMPs are not backlogged, hence we cannot get an established
369 BUG_TRAP(req
->sk
== NULL
);
371 if (seq
!= dccp_rsk(req
)->dreq_iss
) {
372 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
376 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
379 case DCCP_REQUESTING
:
380 case DCCP_RESPOND
: /* Cannot happen.
381 It can, it SYNs are crossed. --ANK */
382 if (!sock_owned_by_user(sk
)) {
383 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS
);
386 * Wake people up to see the error
387 * (see connect in sock.c)
389 sk
->sk_error_report(sk
);
392 sk
->sk_err_soft
= err
;
396 if (!sock_owned_by_user(sk
) && np
->recverr
) {
398 sk
->sk_error_report(sk
);
400 sk
->sk_err_soft
= err
;
408 static int dccp_v6_send_response(struct sock
*sk
, struct request_sock
*req
,
409 struct dst_entry
*dst
)
411 struct inet6_request_sock
*ireq6
= inet6_rsk(req
);
412 struct ipv6_pinfo
*np
= inet6_sk(sk
);
414 struct ipv6_txoptions
*opt
= NULL
;
415 struct in6_addr
*final_p
= NULL
, final
;
419 memset(&fl
, 0, sizeof(fl
));
420 fl
.proto
= IPPROTO_DCCP
;
421 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
422 ipv6_addr_copy(&fl
.fl6_src
, &ireq6
->loc_addr
);
423 fl
.fl6_flowlabel
= 0;
425 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
426 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
427 security_req_classify_flow(req
, &fl
);
432 np
->rxopt
.bits
.osrcrt
== 2 &&
434 struct sk_buff
*pktopts
= ireq6
->pktopts
;
435 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
438 opt
= ipv6_invert_rthdr(sk
,
439 (struct ipv6_rt_hdr
*)(pktopts
->nh
.raw
+
443 if (opt
!= NULL
&& opt
->srcrt
!= NULL
) {
444 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)opt
->srcrt
;
446 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
447 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
451 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
456 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
458 err
= xfrm_lookup(&dst
, &fl
, sk
, 0);
463 skb
= dccp_make_response(sk
, dst
, req
);
465 struct dccp_hdr
*dh
= dccp_hdr(skb
);
467 dh
->dccph_checksum
= dccp_v6_check(dh
, skb
->len
,
470 csum_partial((char *)dh
,
473 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
474 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
475 if (err
== NET_XMIT_CN
)
480 if (opt
!= NULL
&& opt
!= np
->opt
)
481 sock_kfree_s(sk
, opt
, opt
->tot_len
);
486 static void dccp_v6_reqsk_destructor(struct request_sock
*req
)
488 if (inet6_rsk(req
)->pktopts
!= NULL
)
489 kfree_skb(inet6_rsk(req
)->pktopts
);
492 static struct request_sock_ops dccp6_request_sock_ops
= {
494 .obj_size
= sizeof(struct dccp6_request_sock
),
495 .rtx_syn_ack
= dccp_v6_send_response
,
496 .send_ack
= dccp_v6_reqsk_send_ack
,
497 .destructor
= dccp_v6_reqsk_destructor
,
498 .send_reset
= dccp_v6_ctl_send_reset
,
501 static struct timewait_sock_ops dccp6_timewait_sock_ops
= {
502 .twsk_obj_size
= sizeof(struct dccp6_timewait_sock
),
505 static void dccp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
507 struct ipv6_pinfo
*np
= inet6_sk(sk
);
508 struct dccp_hdr
*dh
= dccp_hdr(skb
);
510 dh
->dccph_checksum
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
,
512 csum_partial((char *)dh
,
517 static void dccp_v6_ctl_send_reset(struct sk_buff
*rxskb
)
519 struct dccp_hdr
*rxdh
= dccp_hdr(rxskb
), *dh
;
520 const u32 dccp_hdr_reset_len
= sizeof(struct dccp_hdr
) +
521 sizeof(struct dccp_hdr_ext
) +
522 sizeof(struct dccp_hdr_reset
);
527 if (rxdh
->dccph_type
== DCCP_PKT_RESET
)
530 if (!ipv6_unicast_destination(rxskb
))
533 skb
= alloc_skb(dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
,
538 skb_reserve(skb
, dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
);
540 skb
->h
.raw
= skb_push(skb
, dccp_hdr_reset_len
);
542 memset(dh
, 0, dccp_hdr_reset_len
);
544 /* Swap the send and the receive. */
545 dh
->dccph_type
= DCCP_PKT_RESET
;
546 dh
->dccph_sport
= rxdh
->dccph_dport
;
547 dh
->dccph_dport
= rxdh
->dccph_sport
;
548 dh
->dccph_doff
= dccp_hdr_reset_len
/ 4;
550 dccp_hdr_reset(skb
)->dccph_reset_code
=
551 DCCP_SKB_CB(rxskb
)->dccpd_reset_code
;
553 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
555 if (DCCP_SKB_CB(rxskb
)->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
556 dccp_set_seqno(&seqno
, DCCP_SKB_CB(rxskb
)->dccpd_ack_seq
+ 1);
558 dccp_hdr_set_seq(dh
, seqno
);
559 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
),
560 DCCP_SKB_CB(rxskb
)->dccpd_seq
);
562 memset(&fl
, 0, sizeof(fl
));
563 ipv6_addr_copy(&fl
.fl6_dst
, &rxskb
->nh
.ipv6h
->saddr
);
564 ipv6_addr_copy(&fl
.fl6_src
, &rxskb
->nh
.ipv6h
->daddr
);
565 dh
->dccph_checksum
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
566 sizeof(*dh
), IPPROTO_DCCP
,
568 fl
.proto
= IPPROTO_DCCP
;
569 fl
.oif
= inet6_iif(rxskb
);
570 fl
.fl_ip_dport
= dh
->dccph_dport
;
571 fl
.fl_ip_sport
= dh
->dccph_sport
;
572 security_skb_classify_flow(rxskb
, &fl
);
574 /* sk = NULL, but it is safe for now. RST socket required. */
575 if (!ip6_dst_lookup(NULL
, &skb
->dst
, &fl
)) {
576 if (xfrm_lookup(&skb
->dst
, &fl
, NULL
, 0) >= 0) {
577 ip6_xmit(dccp_v6_ctl_socket
->sk
, skb
, &fl
, NULL
, 0);
578 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS
);
579 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS
);
587 static void dccp_v6_reqsk_send_ack(struct sk_buff
*rxskb
,
588 struct request_sock
*req
)
591 struct dccp_hdr
*rxdh
= dccp_hdr(rxskb
), *dh
;
592 const u32 dccp_hdr_ack_len
= sizeof(struct dccp_hdr
) +
593 sizeof(struct dccp_hdr_ext
) +
594 sizeof(struct dccp_hdr_ack_bits
);
597 skb
= alloc_skb(dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
,
602 skb_reserve(skb
, dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
);
604 skb
->h
.raw
= skb_push(skb
, dccp_hdr_ack_len
);
606 memset(dh
, 0, dccp_hdr_ack_len
);
608 /* Build DCCP header and checksum it. */
609 dh
->dccph_type
= DCCP_PKT_ACK
;
610 dh
->dccph_sport
= rxdh
->dccph_dport
;
611 dh
->dccph_dport
= rxdh
->dccph_sport
;
612 dh
->dccph_doff
= dccp_hdr_ack_len
/ 4;
615 dccp_hdr_set_seq(dh
, DCCP_SKB_CB(rxskb
)->dccpd_ack_seq
);
616 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
),
617 DCCP_SKB_CB(rxskb
)->dccpd_seq
);
619 memset(&fl
, 0, sizeof(fl
));
620 ipv6_addr_copy(&fl
.fl6_dst
, &rxskb
->nh
.ipv6h
->saddr
);
621 ipv6_addr_copy(&fl
.fl6_src
, &rxskb
->nh
.ipv6h
->daddr
);
623 /* FIXME: calculate checksum, IPv4 also should... */
625 fl
.proto
= IPPROTO_DCCP
;
626 fl
.oif
= inet6_iif(rxskb
);
627 fl
.fl_ip_dport
= dh
->dccph_dport
;
628 fl
.fl_ip_sport
= dh
->dccph_sport
;
629 security_req_classify_flow(req
, &fl
);
631 if (!ip6_dst_lookup(NULL
, &skb
->dst
, &fl
)) {
632 if (xfrm_lookup(&skb
->dst
, &fl
, NULL
, 0) >= 0) {
633 ip6_xmit(dccp_v6_ctl_socket
->sk
, skb
, &fl
, NULL
, 0);
634 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS
);
642 static struct sock
*dccp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
644 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
645 const struct ipv6hdr
*iph
= skb
->nh
.ipv6h
;
647 struct request_sock
**prev
;
648 /* Find possible connection requests. */
649 struct request_sock
*req
= inet6_csk_search_req(sk
, &prev
,
655 return dccp_check_req(sk
, skb
, req
, prev
);
657 nsk
= __inet6_lookup_established(&dccp_hashinfo
,
658 &iph
->saddr
, dh
->dccph_sport
,
659 &iph
->daddr
, ntohs(dh
->dccph_dport
),
662 if (nsk
->sk_state
!= DCCP_TIME_WAIT
) {
666 inet_twsk_put(inet_twsk(nsk
));
673 static int dccp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
676 struct request_sock
*req
;
677 struct dccp_request_sock
*dreq
;
678 struct inet6_request_sock
*ireq6
;
679 struct ipv6_pinfo
*np
= inet6_sk(sk
);
680 const __be32 service
= dccp_hdr_request(skb
)->dccph_req_service
;
681 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
682 __u8 reset_code
= DCCP_RESET_CODE_TOO_BUSY
;
684 if (skb
->protocol
== htons(ETH_P_IP
))
685 return dccp_v4_conn_request(sk
, skb
);
687 if (!ipv6_unicast_destination(skb
))
690 if (dccp_bad_service_code(sk
, service
)) {
691 reset_code
= DCCP_RESET_CODE_BAD_SERVICE_CODE
;
695 * There are no SYN attacks on IPv6, yet...
697 if (inet_csk_reqsk_queue_is_full(sk
))
700 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
703 req
= inet6_reqsk_alloc(&dccp6_request_sock_ops
);
707 /* FIXME: process options */
709 dccp_openreq_init(req
, &dp
, skb
);
711 if (security_inet_conn_request(sk
, skb
, req
))
714 ireq6
= inet6_rsk(req
);
715 ipv6_addr_copy(&ireq6
->rmt_addr
, &skb
->nh
.ipv6h
->saddr
);
716 ipv6_addr_copy(&ireq6
->loc_addr
, &skb
->nh
.ipv6h
->daddr
);
717 req
->rcv_wnd
= dccp_feat_default_sequence_window
;
718 ireq6
->pktopts
= NULL
;
720 if (ipv6_opt_accepted(sk
, skb
) ||
721 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
722 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
723 atomic_inc(&skb
->users
);
724 ireq6
->pktopts
= skb
;
726 ireq6
->iif
= sk
->sk_bound_dev_if
;
728 /* So that link locals have meaning */
729 if (!sk
->sk_bound_dev_if
&&
730 ipv6_addr_type(&ireq6
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
731 ireq6
->iif
= inet6_iif(skb
);
734 * Step 3: Process LISTEN state
736 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
738 * In fact we defer setting S.GSR, S.SWL, S.SWH to
739 * dccp_create_openreq_child.
741 dreq
= dccp_rsk(req
);
742 dreq
->dreq_isr
= dcb
->dccpd_seq
;
743 dreq
->dreq_iss
= dccp_v6_init_sequence(sk
, skb
);
744 dreq
->dreq_service
= service
;
746 if (dccp_v6_send_response(sk
, req
, NULL
))
749 inet6_csk_reqsk_queue_hash_add(sk
, req
, DCCP_TIMEOUT_INIT
);
755 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS
);
756 dcb
->dccpd_reset_code
= reset_code
;
760 static struct sock
*dccp_v6_request_recv_sock(struct sock
*sk
,
762 struct request_sock
*req
,
763 struct dst_entry
*dst
)
765 struct inet6_request_sock
*ireq6
= inet6_rsk(req
);
766 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
767 struct inet_sock
*newinet
;
768 struct dccp_sock
*newdp
;
769 struct dccp6_sock
*newdp6
;
771 struct ipv6_txoptions
*opt
;
773 if (skb
->protocol
== htons(ETH_P_IP
)) {
777 newsk
= dccp_v4_request_recv_sock(sk
, skb
, req
, dst
);
781 newdp6
= (struct dccp6_sock
*)newsk
;
782 newdp
= dccp_sk(newsk
);
783 newinet
= inet_sk(newsk
);
784 newinet
->pinet6
= &newdp6
->inet6
;
785 newnp
= inet6_sk(newsk
);
787 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
789 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
792 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
795 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
797 inet_csk(newsk
)->icsk_af_ops
= &dccp_ipv6_mapped
;
798 newsk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
799 newnp
->pktoptions
= NULL
;
801 newnp
->mcast_oif
= inet6_iif(skb
);
802 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
805 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
806 * here, dccp_create_openreq_child now does this for us, see the comment in
807 * that function for the gory details. -acme
810 /* It is tricky place. Until this moment IPv4 tcp
811 worked with IPv6 icsk.icsk_af_ops.
814 dccp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
821 if (sk_acceptq_is_full(sk
))
824 if (np
->rxopt
.bits
.osrcrt
== 2 && opt
== NULL
&& ireq6
->pktopts
) {
825 const struct inet6_skb_parm
*rxopt
= IP6CB(ireq6
->pktopts
);
828 opt
= ipv6_invert_rthdr(sk
,
829 (struct ipv6_rt_hdr
*)(ireq6
->pktopts
->nh
.raw
+
834 struct in6_addr
*final_p
= NULL
, final
;
837 memset(&fl
, 0, sizeof(fl
));
838 fl
.proto
= IPPROTO_DCCP
;
839 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
840 if (opt
!= NULL
&& opt
->srcrt
!= NULL
) {
841 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)opt
->srcrt
;
843 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
844 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
847 ipv6_addr_copy(&fl
.fl6_src
, &ireq6
->loc_addr
);
848 fl
.oif
= sk
->sk_bound_dev_if
;
849 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
850 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
851 security_sk_classify_flow(sk
, &fl
);
853 if (ip6_dst_lookup(sk
, &dst
, &fl
))
857 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
859 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
863 newsk
= dccp_create_openreq_child(sk
, req
, skb
);
868 * No need to charge this sock to the relevant IPv6 refcnt debug socks
869 * count here, dccp_create_openreq_child now does this for us, see the
870 * comment in that function for the gory details. -acme
873 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
874 newsk
->sk_route_caps
= dst
->dev
->features
& ~(NETIF_F_IP_CSUM
|
876 newdp6
= (struct dccp6_sock
*)newsk
;
877 newinet
= inet_sk(newsk
);
878 newinet
->pinet6
= &newdp6
->inet6
;
879 newdp
= dccp_sk(newsk
);
880 newnp
= inet6_sk(newsk
);
882 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
884 ipv6_addr_copy(&newnp
->daddr
, &ireq6
->rmt_addr
);
885 ipv6_addr_copy(&newnp
->saddr
, &ireq6
->loc_addr
);
886 ipv6_addr_copy(&newnp
->rcv_saddr
, &ireq6
->loc_addr
);
887 newsk
->sk_bound_dev_if
= ireq6
->iif
;
889 /* Now IPv6 options...
891 First: no IPv4 options.
896 newnp
->rxopt
.all
= np
->rxopt
.all
;
898 /* Clone pktoptions received with SYN */
899 newnp
->pktoptions
= NULL
;
900 if (ireq6
->pktopts
!= NULL
) {
901 newnp
->pktoptions
= skb_clone(ireq6
->pktopts
, GFP_ATOMIC
);
902 kfree_skb(ireq6
->pktopts
);
903 ireq6
->pktopts
= NULL
;
904 if (newnp
->pktoptions
)
905 skb_set_owner_r(newnp
->pktoptions
, newsk
);
908 newnp
->mcast_oif
= inet6_iif(skb
);
909 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
912 * Clone native IPv6 options from listening socket (if any)
914 * Yes, keeping reference count would be much more clever, but we make
915 * one more one thing there: reattach optmem to newsk.
918 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
920 sock_kfree_s(sk
, opt
, opt
->tot_len
);
923 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
924 if (newnp
->opt
!= NULL
)
925 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
926 newnp
->opt
->opt_flen
);
928 dccp_sync_mss(newsk
, dst_mtu(dst
));
930 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
932 __inet6_hash(&dccp_hashinfo
, newsk
);
933 inet_inherit_port(&dccp_hashinfo
, sk
, newsk
);
938 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
940 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
941 if (opt
!= NULL
&& opt
!= np
->opt
)
942 sock_kfree_s(sk
, opt
, opt
->tot_len
);
947 /* The socket must have it's spinlock held when we get
950 * We have a potential double-lock case here, so even when
951 * doing backlog processing we use the BH locking scheme.
952 * This is because we cannot sleep with the original spinlock
955 static int dccp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
957 struct ipv6_pinfo
*np
= inet6_sk(sk
);
958 struct sk_buff
*opt_skb
= NULL
;
960 /* Imagine: socket is IPv6. IPv4 packet arrives,
961 goes to IPv4 receive handler and backlogged.
962 From backlog it always goes here. Kerboom...
963 Fortunately, dccp_rcv_established and rcv_established
964 handle them correctly, but it is not case with
965 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
968 if (skb
->protocol
== htons(ETH_P_IP
))
969 return dccp_v4_do_rcv(sk
, skb
);
971 if (sk_filter(sk
, skb
))
975 * socket locking is here for SMP purposes as backlog rcv is currently
976 * called with bh processing disabled.
979 /* Do Stevens' IPV6_PKTOPTIONS.
981 Yes, guys, it is the only place in our code, where we
982 may make it not affecting IPv4.
983 The rest of code is protocol independent,
984 and I do not like idea to uglify IPv4.
986 Actually, all the idea behind IPV6_PKTOPTIONS
987 looks not very well thought. For now we latch
988 options, received in the last packet, enqueued
989 by tcp. Feel free to propose better solution.
993 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
995 if (sk
->sk_state
== DCCP_OPEN
) { /* Fast path */
996 if (dccp_rcv_established(sk
, skb
, dccp_hdr(skb
), skb
->len
))
999 /* This is where we would goto ipv6_pktoptions. */
1000 __kfree_skb(opt_skb
);
1005 if (sk
->sk_state
== DCCP_LISTEN
) {
1006 struct sock
*nsk
= dccp_v6_hnd_req(sk
, skb
);
1011 * Queue it on the new socket if the new socket is active,
1012 * otherwise we just shortcircuit this and continue with
1016 if (dccp_child_process(sk
, nsk
, skb
))
1018 if (opt_skb
!= NULL
)
1019 __kfree_skb(opt_skb
);
1024 if (dccp_rcv_state_process(sk
, skb
, dccp_hdr(skb
), skb
->len
))
1027 /* This is where we would goto ipv6_pktoptions. */
1028 __kfree_skb(opt_skb
);
1033 dccp_v6_ctl_send_reset(skb
);
1035 if (opt_skb
!= NULL
)
1036 __kfree_skb(opt_skb
);
1041 static int dccp_v6_rcv(struct sk_buff
**pskb
)
1043 const struct dccp_hdr
*dh
;
1044 struct sk_buff
*skb
= *pskb
;
1047 /* Step 1: Check header basics: */
1049 if (dccp_invalid_packet(skb
))
1054 DCCP_SKB_CB(skb
)->dccpd_seq
= dccp_hdr_seq(skb
);
1055 DCCP_SKB_CB(skb
)->dccpd_type
= dh
->dccph_type
;
1057 if (dccp_packet_without_ack(skb
))
1058 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= DCCP_PKT_WITHOUT_ACK_SEQ
;
1060 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= dccp_hdr_ack_seq(skb
);
1063 * Look up flow ID in table and get corresponding socket */
1064 sk
= __inet6_lookup(&dccp_hashinfo
, &skb
->nh
.ipv6h
->saddr
,
1066 &skb
->nh
.ipv6h
->daddr
, ntohs(dh
->dccph_dport
),
1071 * Generate Reset(No Connection) unless P.type == Reset
1072 * Drop packet and return
1075 goto no_dccp_socket
;
1079 * ... or S.state == TIMEWAIT,
1080 * Generate Reset(No Connection) unless P.type == Reset
1081 * Drop packet and return
1083 if (sk
->sk_state
== DCCP_TIME_WAIT
)
1086 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1087 goto discard_and_relse
;
1089 return sk_receive_skb(sk
, skb
) ? -1 : 0;
1092 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1096 * Generate Reset(No Connection) unless P.type == Reset
1097 * Drop packet and return
1099 if (dh
->dccph_type
!= DCCP_PKT_RESET
) {
1100 DCCP_SKB_CB(skb
)->dccpd_reset_code
=
1101 DCCP_RESET_CODE_NO_CONNECTION
;
1102 dccp_v6_ctl_send_reset(skb
);
1118 inet_twsk_put(inet_twsk(sk
));
1119 goto no_dccp_socket
;
1122 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops
= {
1123 .queue_xmit
= inet6_csk_xmit
,
1124 .send_check
= dccp_v6_send_check
,
1125 .rebuild_header
= inet6_sk_rebuild_header
,
1126 .conn_request
= dccp_v6_conn_request
,
1127 .syn_recv_sock
= dccp_v6_request_recv_sock
,
1128 .net_header_len
= sizeof(struct ipv6hdr
),
1129 .setsockopt
= ipv6_setsockopt
,
1130 .getsockopt
= ipv6_getsockopt
,
1131 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1132 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1133 #ifdef CONFIG_COMPAT
1134 .compat_setsockopt
= compat_ipv6_setsockopt
,
1135 .compat_getsockopt
= compat_ipv6_getsockopt
,
1140 * DCCP over IPv4 via INET6 API
1142 static struct inet_connection_sock_af_ops dccp_ipv6_mapped
= {
1143 .queue_xmit
= ip_queue_xmit
,
1144 .send_check
= dccp_v4_send_check
,
1145 .rebuild_header
= inet_sk_rebuild_header
,
1146 .conn_request
= dccp_v6_conn_request
,
1147 .syn_recv_sock
= dccp_v6_request_recv_sock
,
1148 .net_header_len
= sizeof(struct iphdr
),
1149 .setsockopt
= ipv6_setsockopt
,
1150 .getsockopt
= ipv6_getsockopt
,
1151 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1152 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1153 #ifdef CONFIG_COMPAT
1154 .compat_setsockopt
= compat_ipv6_setsockopt
,
1155 .compat_getsockopt
= compat_ipv6_getsockopt
,
1159 /* NOTE: A lot of things set to zero explicitly by call to
1160 * sk_alloc() so need not be done here.
1162 static int dccp_v6_init_sock(struct sock
*sk
)
1164 static __u8 dccp_v6_ctl_sock_initialized
;
1165 int err
= dccp_init_sock(sk
, dccp_v6_ctl_sock_initialized
);
1168 if (unlikely(!dccp_v6_ctl_sock_initialized
))
1169 dccp_v6_ctl_sock_initialized
= 1;
1170 inet_csk(sk
)->icsk_af_ops
= &dccp_ipv6_af_ops
;
1176 static int dccp_v6_destroy_sock(struct sock
*sk
)
1178 dccp_destroy_sock(sk
);
1179 return inet6_destroy_sock(sk
);
1182 static struct proto dccp_v6_prot
= {
1184 .owner
= THIS_MODULE
,
1185 .close
= dccp_close
,
1186 .connect
= dccp_v6_connect
,
1187 .disconnect
= dccp_disconnect
,
1188 .ioctl
= dccp_ioctl
,
1189 .init
= dccp_v6_init_sock
,
1190 .setsockopt
= dccp_setsockopt
,
1191 .getsockopt
= dccp_getsockopt
,
1192 .sendmsg
= dccp_sendmsg
,
1193 .recvmsg
= dccp_recvmsg
,
1194 .backlog_rcv
= dccp_v6_do_rcv
,
1195 .hash
= dccp_v6_hash
,
1196 .unhash
= dccp_unhash
,
1197 .accept
= inet_csk_accept
,
1198 .get_port
= dccp_v6_get_port
,
1199 .shutdown
= dccp_shutdown
,
1200 .destroy
= dccp_v6_destroy_sock
,
1201 .orphan_count
= &dccp_orphan_count
,
1202 .max_header
= MAX_DCCP_HEADER
,
1203 .obj_size
= sizeof(struct dccp6_sock
),
1204 .rsk_prot
= &dccp6_request_sock_ops
,
1205 .twsk_prot
= &dccp6_timewait_sock_ops
,
1206 #ifdef CONFIG_COMPAT
1207 .compat_setsockopt
= compat_dccp_setsockopt
,
1208 .compat_getsockopt
= compat_dccp_getsockopt
,
1212 static struct inet6_protocol dccp_v6_protocol
= {
1213 .handler
= dccp_v6_rcv
,
1214 .err_handler
= dccp_v6_err
,
1215 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_FINAL
,
1218 static struct proto_ops inet6_dccp_ops
= {
1220 .owner
= THIS_MODULE
,
1221 .release
= inet6_release
,
1223 .connect
= inet_stream_connect
,
1224 .socketpair
= sock_no_socketpair
,
1225 .accept
= inet_accept
,
1226 .getname
= inet6_getname
,
1228 .ioctl
= inet6_ioctl
,
1229 .listen
= inet_dccp_listen
,
1230 .shutdown
= inet_shutdown
,
1231 .setsockopt
= sock_common_setsockopt
,
1232 .getsockopt
= sock_common_getsockopt
,
1233 .sendmsg
= inet_sendmsg
,
1234 .recvmsg
= sock_common_recvmsg
,
1235 .mmap
= sock_no_mmap
,
1236 .sendpage
= sock_no_sendpage
,
1237 #ifdef CONFIG_COMPAT
1238 .compat_setsockopt
= compat_sock_common_setsockopt
,
1239 .compat_getsockopt
= compat_sock_common_getsockopt
,
1243 static struct inet_protosw dccp_v6_protosw
= {
1245 .protocol
= IPPROTO_DCCP
,
1246 .prot
= &dccp_v6_prot
,
1247 .ops
= &inet6_dccp_ops
,
1249 .flags
= INET_PROTOSW_ICSK
,
1252 static int __init
dccp_v6_init(void)
1254 int err
= proto_register(&dccp_v6_prot
, 1);
1259 err
= inet6_add_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1261 goto out_unregister_proto
;
1263 inet6_register_protosw(&dccp_v6_protosw
);
1265 err
= inet_csk_ctl_sock_create(&dccp_v6_ctl_socket
, PF_INET6
,
1266 SOCK_DCCP
, IPPROTO_DCCP
);
1268 goto out_unregister_protosw
;
1271 out_unregister_protosw
:
1272 inet6_del_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1273 inet6_unregister_protosw(&dccp_v6_protosw
);
1274 out_unregister_proto
:
1275 proto_unregister(&dccp_v6_prot
);
1279 static void __exit
dccp_v6_exit(void)
1281 inet6_del_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1282 inet6_unregister_protosw(&dccp_v6_protosw
);
1283 proto_unregister(&dccp_v6_prot
);
1286 module_init(dccp_v6_init
);
1287 module_exit(dccp_v6_exit
);
1290 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1291 * values directly, Also cover the case where the protocol is not specified,
1292 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1294 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-33-type-6");
1295 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-0-type-6");
1296 MODULE_LICENSE("GPL");
1297 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1298 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");