1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux INET6 implementation
6 * Based on net/dccp6/ipv6.c
8 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/xfrm.h>
15 #include <linux/string.h>
17 #include <net/addrconf.h>
18 #include <net/inet_common.h>
19 #include <net/inet_hashtables.h>
20 #include <net/inet_sock.h>
21 #include <net/inet6_connection_sock.h>
22 #include <net/inet6_hashtables.h>
23 #include <net/ip6_route.h>
25 #include <net/protocol.h>
26 #include <net/transp_v6.h>
27 #include <net/ip6_checksum.h>
29 #include <net/secure_seq.h>
36 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
38 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped
;
39 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops
;
41 /* add pseudo-header to DCCP checksum stored in skb->csum */
42 static inline __sum16
dccp_v6_csum_finish(struct sk_buff
*skb
,
43 const struct in6_addr
*saddr
,
44 const struct in6_addr
*daddr
)
46 return csum_ipv6_magic(saddr
, daddr
, skb
->len
, IPPROTO_DCCP
, skb
->csum
);
49 static inline void dccp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
51 struct ipv6_pinfo
*np
= inet6_sk(sk
);
52 struct dccp_hdr
*dh
= dccp_hdr(skb
);
54 dccp_csum_outgoing(skb
);
55 dh
->dccph_checksum
= dccp_v6_csum_finish(skb
, &np
->saddr
, &sk
->sk_v6_daddr
);
58 static inline __u64
dccp_v6_init_sequence(struct sk_buff
*skb
)
60 return secure_dccpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
61 ipv6_hdr(skb
)->saddr
.s6_addr32
,
62 dccp_hdr(skb
)->dccph_dport
,
63 dccp_hdr(skb
)->dccph_sport
);
67 static int dccp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
68 u8 type
, u8 code
, int offset
, __be32 info
)
70 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
71 const struct dccp_hdr
*dh
;
73 struct ipv6_pinfo
*np
;
77 struct net
*net
= dev_net(skb
->dev
);
79 /* Only need dccph_dport & dccph_sport which are the first
80 * 4 bytes in dccp header.
81 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
83 BUILD_BUG_ON(offsetofend(struct dccp_hdr
, dccph_sport
) > 8);
84 BUILD_BUG_ON(offsetofend(struct dccp_hdr
, dccph_dport
) > 8);
85 dh
= (struct dccp_hdr
*)(skb
->data
+ offset
);
87 sk
= __inet6_lookup_established(net
, &dccp_hashinfo
,
88 &hdr
->daddr
, dh
->dccph_dport
,
89 &hdr
->saddr
, ntohs(dh
->dccph_sport
),
93 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
98 if (sk
->sk_state
== DCCP_TIME_WAIT
) {
99 inet_twsk_put(inet_twsk(sk
));
102 seq
= dccp_hdr_seq(dh
);
103 if (sk
->sk_state
== DCCP_NEW_SYN_RECV
) {
104 dccp_req_err(sk
, seq
);
109 if (sock_owned_by_user(sk
))
110 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
112 if (sk
->sk_state
== DCCP_CLOSED
)
116 if ((1 << sk
->sk_state
) & ~(DCCPF_REQUESTING
| DCCPF_LISTEN
) &&
117 !between48(seq
, dp
->dccps_awl
, dp
->dccps_awh
)) {
118 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
124 if (type
== NDISC_REDIRECT
) {
125 if (!sock_owned_by_user(sk
)) {
126 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
129 dst
->ops
->redirect(dst
, sk
, skb
);
134 if (type
== ICMPV6_PKT_TOOBIG
) {
135 struct dst_entry
*dst
= NULL
;
137 if (!ip6_sk_accept_pmtu(sk
))
140 if (sock_owned_by_user(sk
))
142 if ((1 << sk
->sk_state
) & (DCCPF_LISTEN
| DCCPF_CLOSED
))
145 dst
= inet6_csk_update_pmtu(sk
, ntohl(info
));
149 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
))
150 dccp_sync_mss(sk
, dst_mtu(dst
));
154 icmpv6_err_convert(type
, code
, &err
);
156 /* Might be for an request_sock */
157 switch (sk
->sk_state
) {
158 case DCCP_REQUESTING
:
159 case DCCP_RESPOND
: /* Cannot happen.
160 It can, it SYNs are crossed. --ANK */
161 if (!sock_owned_by_user(sk
)) {
162 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS
);
165 * Wake people up to see the error
166 * (see connect in sock.c)
168 sk
->sk_error_report(sk
);
171 sk
->sk_err_soft
= err
;
175 if (!sock_owned_by_user(sk
) && np
->recverr
) {
177 sk
->sk_error_report(sk
);
179 sk
->sk_err_soft
= err
;
188 static int dccp_v6_send_response(const struct sock
*sk
, struct request_sock
*req
)
190 struct inet_request_sock
*ireq
= inet_rsk(req
);
191 struct ipv6_pinfo
*np
= inet6_sk(sk
);
193 struct in6_addr
*final_p
, final
;
196 struct dst_entry
*dst
;
198 memset(&fl6
, 0, sizeof(fl6
));
199 fl6
.flowi6_proto
= IPPROTO_DCCP
;
200 fl6
.daddr
= ireq
->ir_v6_rmt_addr
;
201 fl6
.saddr
= ireq
->ir_v6_loc_addr
;
203 fl6
.flowi6_oif
= ireq
->ir_iif
;
204 fl6
.fl6_dport
= ireq
->ir_rmt_port
;
205 fl6
.fl6_sport
= htons(ireq
->ir_num
);
206 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
210 final_p
= fl6_update_dst(&fl6
, rcu_dereference(np
->opt
), &final
);
213 dst
= ip6_dst_lookup_flow(sock_net(sk
), sk
, &fl6
, final_p
);
220 skb
= dccp_make_response(sk
, dst
, req
);
222 struct dccp_hdr
*dh
= dccp_hdr(skb
);
223 struct ipv6_txoptions
*opt
;
225 dh
->dccph_checksum
= dccp_v6_csum_finish(skb
,
226 &ireq
->ir_v6_loc_addr
,
227 &ireq
->ir_v6_rmt_addr
);
228 fl6
.daddr
= ireq
->ir_v6_rmt_addr
;
230 opt
= ireq
->ipv6_opt
;
232 opt
= rcu_dereference(np
->opt
);
233 err
= ip6_xmit(sk
, skb
, &fl6
, sk
->sk_mark
, opt
, np
->tclass
,
236 err
= net_xmit_eval(err
);
244 static void dccp_v6_reqsk_destructor(struct request_sock
*req
)
246 dccp_feat_list_purge(&dccp_rsk(req
)->dreq_featneg
);
247 kfree(inet_rsk(req
)->ipv6_opt
);
248 kfree_skb(inet_rsk(req
)->pktopts
);
251 static void dccp_v6_ctl_send_reset(const struct sock
*sk
, struct sk_buff
*rxskb
)
253 const struct ipv6hdr
*rxip6h
;
256 struct net
*net
= dev_net(skb_dst(rxskb
)->dev
);
257 struct sock
*ctl_sk
= net
->dccp
.v6_ctl_sk
;
258 struct dst_entry
*dst
;
260 if (dccp_hdr(rxskb
)->dccph_type
== DCCP_PKT_RESET
)
263 if (!ipv6_unicast_destination(rxskb
))
266 skb
= dccp_ctl_make_reset(ctl_sk
, rxskb
);
270 rxip6h
= ipv6_hdr(rxskb
);
271 dccp_hdr(skb
)->dccph_checksum
= dccp_v6_csum_finish(skb
, &rxip6h
->saddr
,
274 memset(&fl6
, 0, sizeof(fl6
));
275 fl6
.daddr
= rxip6h
->saddr
;
276 fl6
.saddr
= rxip6h
->daddr
;
278 fl6
.flowi6_proto
= IPPROTO_DCCP
;
279 fl6
.flowi6_oif
= inet6_iif(rxskb
);
280 fl6
.fl6_dport
= dccp_hdr(skb
)->dccph_dport
;
281 fl6
.fl6_sport
= dccp_hdr(skb
)->dccph_sport
;
282 security_skb_classify_flow(rxskb
, flowi6_to_flowi(&fl6
));
284 /* sk = NULL, but it is safe for now. RST socket required. */
285 dst
= ip6_dst_lookup_flow(sock_net(ctl_sk
), ctl_sk
, &fl6
, NULL
);
287 skb_dst_set(skb
, dst
);
288 ip6_xmit(ctl_sk
, skb
, &fl6
, 0, NULL
, 0, 0);
289 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
290 DCCP_INC_STATS(DCCP_MIB_OUTRSTS
);
297 static struct request_sock_ops dccp6_request_sock_ops
= {
299 .obj_size
= sizeof(struct dccp6_request_sock
),
300 .rtx_syn_ack
= dccp_v6_send_response
,
301 .send_ack
= dccp_reqsk_send_ack
,
302 .destructor
= dccp_v6_reqsk_destructor
,
303 .send_reset
= dccp_v6_ctl_send_reset
,
304 .syn_ack_timeout
= dccp_syn_ack_timeout
,
307 static int dccp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
309 struct request_sock
*req
;
310 struct dccp_request_sock
*dreq
;
311 struct inet_request_sock
*ireq
;
312 struct ipv6_pinfo
*np
= inet6_sk(sk
);
313 const __be32 service
= dccp_hdr_request(skb
)->dccph_req_service
;
314 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
316 if (skb
->protocol
== htons(ETH_P_IP
))
317 return dccp_v4_conn_request(sk
, skb
);
319 if (!ipv6_unicast_destination(skb
))
320 return 0; /* discard, don't send a reset here */
322 if (dccp_bad_service_code(sk
, service
)) {
323 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_BAD_SERVICE_CODE
;
327 * There are no SYN attacks on IPv6, yet...
329 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_TOO_BUSY
;
330 if (inet_csk_reqsk_queue_is_full(sk
))
333 if (sk_acceptq_is_full(sk
))
336 req
= inet_reqsk_alloc(&dccp6_request_sock_ops
, sk
, true);
340 if (dccp_reqsk_init(req
, dccp_sk(sk
), skb
))
343 dreq
= dccp_rsk(req
);
344 if (dccp_parse_options(sk
, dreq
, skb
))
347 if (security_inet_conn_request(sk
, skb
, req
))
350 ireq
= inet_rsk(req
);
351 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
352 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
353 ireq
->ireq_family
= AF_INET6
;
354 ireq
->ir_mark
= inet_request_mark(sk
, skb
);
356 if (ipv6_opt_accepted(sk
, skb
, IP6CB(skb
)) ||
357 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
358 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
359 refcount_inc(&skb
->users
);
362 ireq
->ir_iif
= sk
->sk_bound_dev_if
;
364 /* So that link locals have meaning */
365 if (!sk
->sk_bound_dev_if
&&
366 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
367 ireq
->ir_iif
= inet6_iif(skb
);
370 * Step 3: Process LISTEN state
372 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
374 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
376 dreq
->dreq_isr
= dcb
->dccpd_seq
;
377 dreq
->dreq_gsr
= dreq
->dreq_isr
;
378 dreq
->dreq_iss
= dccp_v6_init_sequence(skb
);
379 dreq
->dreq_gss
= dreq
->dreq_iss
;
380 dreq
->dreq_service
= service
;
382 if (dccp_v6_send_response(sk
, req
))
385 inet_csk_reqsk_queue_hash_add(sk
, req
, DCCP_TIMEOUT_INIT
);
392 __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS
);
396 static struct sock
*dccp_v6_request_recv_sock(const struct sock
*sk
,
398 struct request_sock
*req
,
399 struct dst_entry
*dst
,
400 struct request_sock
*req_unhash
,
403 struct inet_request_sock
*ireq
= inet_rsk(req
);
404 struct ipv6_pinfo
*newnp
;
405 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
406 struct ipv6_txoptions
*opt
;
407 struct inet_sock
*newinet
;
408 struct dccp6_sock
*newdp6
;
411 if (skb
->protocol
== htons(ETH_P_IP
)) {
415 newsk
= dccp_v4_request_recv_sock(sk
, skb
, req
, dst
,
416 req_unhash
, own_req
);
420 newdp6
= (struct dccp6_sock
*)newsk
;
421 newinet
= inet_sk(newsk
);
422 newinet
->pinet6
= &newdp6
->inet6
;
423 newnp
= inet6_sk(newsk
);
425 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
427 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
429 inet_csk(newsk
)->icsk_af_ops
= &dccp_ipv6_mapped
;
430 newsk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
431 newnp
->pktoptions
= NULL
;
433 newnp
->ipv6_mc_list
= NULL
;
434 newnp
->ipv6_ac_list
= NULL
;
435 newnp
->ipv6_fl_list
= NULL
;
436 newnp
->mcast_oif
= inet_iif(skb
);
437 newnp
->mcast_hops
= ip_hdr(skb
)->ttl
;
440 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
441 * here, dccp_create_openreq_child now does this for us, see the comment in
442 * that function for the gory details. -acme
445 /* It is tricky place. Until this moment IPv4 tcp
446 worked with IPv6 icsk.icsk_af_ops.
449 dccp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
455 if (sk_acceptq_is_full(sk
))
461 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_DCCP
);
466 newsk
= dccp_create_openreq_child(sk
, req
, skb
);
471 * No need to charge this sock to the relevant IPv6 refcnt debug socks
472 * count here, dccp_create_openreq_child now does this for us, see the
473 * comment in that function for the gory details. -acme
476 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
477 newsk
->sk_route_caps
= dst
->dev
->features
& ~(NETIF_F_IP_CSUM
|
479 newdp6
= (struct dccp6_sock
*)newsk
;
480 newinet
= inet_sk(newsk
);
481 newinet
->pinet6
= &newdp6
->inet6
;
482 newnp
= inet6_sk(newsk
);
484 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
486 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
487 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
488 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
489 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
491 /* Now IPv6 options...
493 First: no IPv4 options.
495 newinet
->inet_opt
= NULL
;
498 newnp
->rxopt
.all
= np
->rxopt
.all
;
500 newnp
->ipv6_mc_list
= NULL
;
501 newnp
->ipv6_ac_list
= NULL
;
502 newnp
->ipv6_fl_list
= NULL
;
503 newnp
->pktoptions
= NULL
;
505 newnp
->mcast_oif
= inet6_iif(skb
);
506 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
509 * Clone native IPv6 options from listening socket (if any)
511 * Yes, keeping reference count would be much more clever, but we make
512 * one more one thing there: reattach optmem to newsk.
514 opt
= ireq
->ipv6_opt
;
516 opt
= rcu_dereference(np
->opt
);
518 opt
= ipv6_dup_options(newsk
, opt
);
519 RCU_INIT_POINTER(newnp
->opt
, opt
);
521 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
523 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
526 dccp_sync_mss(newsk
, dst_mtu(dst
));
528 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
529 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
531 if (__inet_inherit_port(sk
, newsk
) < 0) {
532 inet_csk_prepare_forced_close(newsk
);
536 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
537 /* Clone pktoptions received with SYN, if we own the req */
538 if (*own_req
&& ireq
->pktopts
) {
539 newnp
->pktoptions
= skb_clone(ireq
->pktopts
, GFP_ATOMIC
);
540 consume_skb(ireq
->pktopts
);
541 ireq
->pktopts
= NULL
;
542 if (newnp
->pktoptions
)
543 skb_set_owner_r(newnp
->pktoptions
, newsk
);
549 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
553 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
557 /* The socket must have it's spinlock held when we get
560 * We have a potential double-lock case here, so even when
561 * doing backlog processing we use the BH locking scheme.
562 * This is because we cannot sleep with the original spinlock
565 static int dccp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
567 struct ipv6_pinfo
*np
= inet6_sk(sk
);
568 struct sk_buff
*opt_skb
= NULL
;
570 /* Imagine: socket is IPv6. IPv4 packet arrives,
571 goes to IPv4 receive handler and backlogged.
572 From backlog it always goes here. Kerboom...
573 Fortunately, dccp_rcv_established and rcv_established
574 handle them correctly, but it is not case with
575 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
578 if (skb
->protocol
== htons(ETH_P_IP
))
579 return dccp_v4_do_rcv(sk
, skb
);
581 if (sk_filter(sk
, skb
))
585 * socket locking is here for SMP purposes as backlog rcv is currently
586 * called with bh processing disabled.
589 /* Do Stevens' IPV6_PKTOPTIONS.
591 Yes, guys, it is the only place in our code, where we
592 may make it not affecting IPv4.
593 The rest of code is protocol independent,
594 and I do not like idea to uglify IPv4.
596 Actually, all the idea behind IPV6_PKTOPTIONS
597 looks not very well thought. For now we latch
598 options, received in the last packet, enqueued
599 by tcp. Feel free to propose better solution.
603 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
605 if (sk
->sk_state
== DCCP_OPEN
) { /* Fast path */
606 if (dccp_rcv_established(sk
, skb
, dccp_hdr(skb
), skb
->len
))
609 goto ipv6_pktoptions
;
614 * Step 3: Process LISTEN state
615 * If S.state == LISTEN,
616 * If P.type == Request or P contains a valid Init Cookie option,
617 * (* Must scan the packet's options to check for Init
618 * Cookies. Only Init Cookies are processed here,
619 * however; other options are processed in Step 8. This
620 * scan need only be performed if the endpoint uses Init
622 * (* Generate a new socket and switch to that socket *)
623 * Set S := new socket for this port pair
625 * Choose S.ISS (initial seqno) or set from Init Cookies
626 * Initialize S.GAR := S.ISS
627 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
628 * Continue with S.state == RESPOND
629 * (* A Response packet will be generated in Step 11 *)
631 * Generate Reset(No Connection) unless P.type == Reset
632 * Drop packet and return
634 * NOTE: the check for the packet types is done in
635 * dccp_rcv_state_process
638 if (dccp_rcv_state_process(sk
, skb
, dccp_hdr(skb
), skb
->len
))
641 goto ipv6_pktoptions
;
645 dccp_v6_ctl_send_reset(sk
, skb
);
648 __kfree_skb(opt_skb
);
652 /* Handling IPV6_PKTOPTIONS skb the similar
653 * way it's done for net/ipv6/tcp_ipv6.c
656 if (!((1 << sk
->sk_state
) & (DCCPF_CLOSED
| DCCPF_LISTEN
))) {
657 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
658 np
->mcast_oif
= inet6_iif(opt_skb
);
659 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
660 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
661 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
662 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
664 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
665 if (ipv6_opt_accepted(sk
, opt_skb
,
666 &DCCP_SKB_CB(opt_skb
)->header
.h6
)) {
667 skb_set_owner_r(opt_skb
, sk
);
668 memmove(IP6CB(opt_skb
),
669 &DCCP_SKB_CB(opt_skb
)->header
.h6
,
670 sizeof(struct inet6_skb_parm
));
671 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
673 __kfree_skb(opt_skb
);
674 opt_skb
= xchg(&np
->pktoptions
, NULL
);
682 static int dccp_v6_rcv(struct sk_buff
*skb
)
684 const struct dccp_hdr
*dh
;
689 /* Step 1: Check header basics */
691 if (dccp_invalid_packet(skb
))
694 /* Step 1: If header checksum is incorrect, drop packet and return. */
695 if (dccp_v6_csum_finish(skb
, &ipv6_hdr(skb
)->saddr
,
696 &ipv6_hdr(skb
)->daddr
)) {
697 DCCP_WARN("dropped packet with invalid checksum\n");
703 DCCP_SKB_CB(skb
)->dccpd_seq
= dccp_hdr_seq(dh
);
704 DCCP_SKB_CB(skb
)->dccpd_type
= dh
->dccph_type
;
706 if (dccp_packet_without_ack(skb
))
707 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= DCCP_PKT_WITHOUT_ACK_SEQ
;
709 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= dccp_hdr_ack_seq(skb
);
712 sk
= __inet6_lookup_skb(&dccp_hashinfo
, skb
, __dccp_hdr_len(dh
),
713 dh
->dccph_sport
, dh
->dccph_dport
,
714 inet6_iif(skb
), 0, &refcounted
);
716 dccp_pr_debug("failed to look up flow ID in table and "
717 "get corresponding socket\n");
723 * ... or S.state == TIMEWAIT,
724 * Generate Reset(No Connection) unless P.type == Reset
725 * Drop packet and return
727 if (sk
->sk_state
== DCCP_TIME_WAIT
) {
728 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
729 inet_twsk_put(inet_twsk(sk
));
733 if (sk
->sk_state
== DCCP_NEW_SYN_RECV
) {
734 struct request_sock
*req
= inet_reqsk(sk
);
737 sk
= req
->rsk_listener
;
738 if (unlikely(sk
->sk_state
!= DCCP_LISTEN
)) {
739 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
744 nsk
= dccp_check_req(sk
, skb
, req
);
747 goto discard_and_relse
;
751 } else if (dccp_child_process(sk
, nsk
, skb
)) {
752 dccp_v6_ctl_send_reset(sk
, skb
);
753 goto discard_and_relse
;
760 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
761 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
762 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
764 min_cov
= dccp_sk(sk
)->dccps_pcrlen
;
765 if (dh
->dccph_cscov
&& (min_cov
== 0 || dh
->dccph_cscov
< min_cov
)) {
766 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
767 dh
->dccph_cscov
, min_cov
);
768 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
769 goto discard_and_relse
;
772 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
773 goto discard_and_relse
;
775 return __sk_receive_skb(sk
, skb
, 1, dh
->dccph_doff
* 4,
776 refcounted
) ? -1 : 0;
779 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
784 * Generate Reset(No Connection) unless P.type == Reset
785 * Drop packet and return
787 if (dh
->dccph_type
!= DCCP_PKT_RESET
) {
788 DCCP_SKB_CB(skb
)->dccpd_reset_code
=
789 DCCP_RESET_CODE_NO_CONNECTION
;
790 dccp_v6_ctl_send_reset(sk
, skb
);
803 static int dccp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
806 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*)uaddr
;
807 struct inet_connection_sock
*icsk
= inet_csk(sk
);
808 struct inet_sock
*inet
= inet_sk(sk
);
809 struct ipv6_pinfo
*np
= inet6_sk(sk
);
810 struct dccp_sock
*dp
= dccp_sk(sk
);
811 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
812 struct ipv6_txoptions
*opt
;
814 struct dst_entry
*dst
;
818 dp
->dccps_role
= DCCP_ROLE_CLIENT
;
820 if (addr_len
< SIN6_LEN_RFC2133
)
823 if (usin
->sin6_family
!= AF_INET6
)
824 return -EAFNOSUPPORT
;
826 memset(&fl6
, 0, sizeof(fl6
));
829 fl6
.flowlabel
= usin
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
;
830 IP6_ECN_flow_init(fl6
.flowlabel
);
831 if (fl6
.flowlabel
& IPV6_FLOWLABEL_MASK
) {
832 struct ip6_flowlabel
*flowlabel
;
833 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
834 if (IS_ERR(flowlabel
))
836 fl6_sock_release(flowlabel
);
840 * connect() to INADDR_ANY means loopback (BSD'ism).
842 if (ipv6_addr_any(&usin
->sin6_addr
))
843 usin
->sin6_addr
.s6_addr
[15] = 1;
845 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
847 if (addr_type
& IPV6_ADDR_MULTICAST
)
850 if (addr_type
& IPV6_ADDR_LINKLOCAL
) {
851 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
852 usin
->sin6_scope_id
) {
853 /* If interface is set while binding, indices
856 if (sk
->sk_bound_dev_if
&&
857 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
860 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
863 /* Connect to link-local address requires an interface */
864 if (!sk
->sk_bound_dev_if
)
868 sk
->sk_v6_daddr
= usin
->sin6_addr
;
869 np
->flow_label
= fl6
.flowlabel
;
874 if (addr_type
== IPV6_ADDR_MAPPED
) {
875 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
876 struct sockaddr_in sin
;
878 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
880 if (__ipv6_only_sock(sk
))
883 sin
.sin_family
= AF_INET
;
884 sin
.sin_port
= usin
->sin6_port
;
885 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
887 icsk
->icsk_af_ops
= &dccp_ipv6_mapped
;
888 sk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
890 err
= dccp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
892 icsk
->icsk_ext_hdr_len
= exthdrlen
;
893 icsk
->icsk_af_ops
= &dccp_ipv6_af_ops
;
894 sk
->sk_backlog_rcv
= dccp_v6_do_rcv
;
897 np
->saddr
= sk
->sk_v6_rcv_saddr
;
901 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
902 saddr
= &sk
->sk_v6_rcv_saddr
;
904 fl6
.flowi6_proto
= IPPROTO_DCCP
;
905 fl6
.daddr
= sk
->sk_v6_daddr
;
906 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
907 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
908 fl6
.fl6_dport
= usin
->sin6_port
;
909 fl6
.fl6_sport
= inet
->inet_sport
;
910 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
912 opt
= rcu_dereference_protected(np
->opt
, lockdep_sock_is_held(sk
));
913 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
915 dst
= ip6_dst_lookup_flow(sock_net(sk
), sk
, &fl6
, final_p
);
923 sk
->sk_v6_rcv_saddr
= *saddr
;
926 /* set the source address */
928 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
930 ip6_dst_store(sk
, dst
, NULL
, NULL
);
932 icsk
->icsk_ext_hdr_len
= 0;
934 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+ opt
->opt_nflen
;
936 inet
->inet_dport
= usin
->sin6_port
;
938 dccp_set_state(sk
, DCCP_REQUESTING
);
939 err
= inet6_hash_connect(&dccp_death_row
, sk
);
943 dp
->dccps_iss
= secure_dccpv6_sequence_number(np
->saddr
.s6_addr32
,
944 sk
->sk_v6_daddr
.s6_addr32
,
947 err
= dccp_connect(sk
);
954 dccp_set_state(sk
, DCCP_CLOSED
);
957 inet
->inet_dport
= 0;
958 sk
->sk_route_caps
= 0;
962 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops
= {
963 .queue_xmit
= inet6_csk_xmit
,
964 .send_check
= dccp_v6_send_check
,
965 .rebuild_header
= inet6_sk_rebuild_header
,
966 .conn_request
= dccp_v6_conn_request
,
967 .syn_recv_sock
= dccp_v6_request_recv_sock
,
968 .net_header_len
= sizeof(struct ipv6hdr
),
969 .setsockopt
= ipv6_setsockopt
,
970 .getsockopt
= ipv6_getsockopt
,
971 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
972 .sockaddr_len
= sizeof(struct sockaddr_in6
),
974 .compat_setsockopt
= compat_ipv6_setsockopt
,
975 .compat_getsockopt
= compat_ipv6_getsockopt
,
980 * DCCP over IPv4 via INET6 API
982 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped
= {
983 .queue_xmit
= ip_queue_xmit
,
984 .send_check
= dccp_v4_send_check
,
985 .rebuild_header
= inet_sk_rebuild_header
,
986 .conn_request
= dccp_v6_conn_request
,
987 .syn_recv_sock
= dccp_v6_request_recv_sock
,
988 .net_header_len
= sizeof(struct iphdr
),
989 .setsockopt
= ipv6_setsockopt
,
990 .getsockopt
= ipv6_getsockopt
,
991 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
992 .sockaddr_len
= sizeof(struct sockaddr_in6
),
994 .compat_setsockopt
= compat_ipv6_setsockopt
,
995 .compat_getsockopt
= compat_ipv6_getsockopt
,
999 /* NOTE: A lot of things set to zero explicitly by call to
1000 * sk_alloc() so need not be done here.
1002 static int dccp_v6_init_sock(struct sock
*sk
)
1004 static __u8 dccp_v6_ctl_sock_initialized
;
1005 int err
= dccp_init_sock(sk
, dccp_v6_ctl_sock_initialized
);
1008 if (unlikely(!dccp_v6_ctl_sock_initialized
))
1009 dccp_v6_ctl_sock_initialized
= 1;
1010 inet_csk(sk
)->icsk_af_ops
= &dccp_ipv6_af_ops
;
1016 static void dccp_v6_destroy_sock(struct sock
*sk
)
1018 dccp_destroy_sock(sk
);
1019 inet6_destroy_sock(sk
);
1022 static struct timewait_sock_ops dccp6_timewait_sock_ops
= {
1023 .twsk_obj_size
= sizeof(struct dccp6_timewait_sock
),
1026 static struct proto dccp_v6_prot
= {
1028 .owner
= THIS_MODULE
,
1029 .close
= dccp_close
,
1030 .connect
= dccp_v6_connect
,
1031 .disconnect
= dccp_disconnect
,
1032 .ioctl
= dccp_ioctl
,
1033 .init
= dccp_v6_init_sock
,
1034 .setsockopt
= dccp_setsockopt
,
1035 .getsockopt
= dccp_getsockopt
,
1036 .sendmsg
= dccp_sendmsg
,
1037 .recvmsg
= dccp_recvmsg
,
1038 .backlog_rcv
= dccp_v6_do_rcv
,
1040 .unhash
= inet_unhash
,
1041 .accept
= inet_csk_accept
,
1042 .get_port
= inet_csk_get_port
,
1043 .shutdown
= dccp_shutdown
,
1044 .destroy
= dccp_v6_destroy_sock
,
1045 .orphan_count
= &dccp_orphan_count
,
1046 .max_header
= MAX_DCCP_HEADER
,
1047 .obj_size
= sizeof(struct dccp6_sock
),
1048 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
1049 .rsk_prot
= &dccp6_request_sock_ops
,
1050 .twsk_prot
= &dccp6_timewait_sock_ops
,
1051 .h
.hashinfo
= &dccp_hashinfo
,
1052 #ifdef CONFIG_COMPAT
1053 .compat_setsockopt
= compat_dccp_setsockopt
,
1054 .compat_getsockopt
= compat_dccp_getsockopt
,
1058 static const struct inet6_protocol dccp_v6_protocol
= {
1059 .handler
= dccp_v6_rcv
,
1060 .err_handler
= dccp_v6_err
,
1061 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_FINAL
,
1064 static const struct proto_ops inet6_dccp_ops
= {
1066 .owner
= THIS_MODULE
,
1067 .release
= inet6_release
,
1069 .connect
= inet_stream_connect
,
1070 .socketpair
= sock_no_socketpair
,
1071 .accept
= inet_accept
,
1072 .getname
= inet6_getname
,
1074 .ioctl
= inet6_ioctl
,
1075 .gettstamp
= sock_gettstamp
,
1076 .listen
= inet_dccp_listen
,
1077 .shutdown
= inet_shutdown
,
1078 .setsockopt
= sock_common_setsockopt
,
1079 .getsockopt
= sock_common_getsockopt
,
1080 .sendmsg
= inet_sendmsg
,
1081 .recvmsg
= sock_common_recvmsg
,
1082 .mmap
= sock_no_mmap
,
1083 .sendpage
= sock_no_sendpage
,
1084 #ifdef CONFIG_COMPAT
1085 .compat_setsockopt
= compat_sock_common_setsockopt
,
1086 .compat_getsockopt
= compat_sock_common_getsockopt
,
1090 static struct inet_protosw dccp_v6_protosw
= {
1092 .protocol
= IPPROTO_DCCP
,
1093 .prot
= &dccp_v6_prot
,
1094 .ops
= &inet6_dccp_ops
,
1095 .flags
= INET_PROTOSW_ICSK
,
1098 static int __net_init
dccp_v6_init_net(struct net
*net
)
1100 if (dccp_hashinfo
.bhash
== NULL
)
1101 return -ESOCKTNOSUPPORT
;
1103 return inet_ctl_sock_create(&net
->dccp
.v6_ctl_sk
, PF_INET6
,
1104 SOCK_DCCP
, IPPROTO_DCCP
, net
);
1107 static void __net_exit
dccp_v6_exit_net(struct net
*net
)
1109 inet_ctl_sock_destroy(net
->dccp
.v6_ctl_sk
);
1112 static void __net_exit
dccp_v6_exit_batch(struct list_head
*net_exit_list
)
1114 inet_twsk_purge(&dccp_hashinfo
, AF_INET6
);
1117 static struct pernet_operations dccp_v6_ops
= {
1118 .init
= dccp_v6_init_net
,
1119 .exit
= dccp_v6_exit_net
,
1120 .exit_batch
= dccp_v6_exit_batch
,
1123 static int __init
dccp_v6_init(void)
1125 int err
= proto_register(&dccp_v6_prot
, 1);
1130 inet6_register_protosw(&dccp_v6_protosw
);
1132 err
= register_pernet_subsys(&dccp_v6_ops
);
1134 goto out_destroy_ctl_sock
;
1136 err
= inet6_add_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1138 goto out_unregister_proto
;
1142 out_unregister_proto
:
1143 unregister_pernet_subsys(&dccp_v6_ops
);
1144 out_destroy_ctl_sock
:
1145 inet6_unregister_protosw(&dccp_v6_protosw
);
1146 proto_unregister(&dccp_v6_prot
);
1150 static void __exit
dccp_v6_exit(void)
1152 inet6_del_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1153 unregister_pernet_subsys(&dccp_v6_ops
);
1154 inet6_unregister_protosw(&dccp_v6_protosw
);
1155 proto_unregister(&dccp_v6_prot
);
1158 module_init(dccp_v6_init
);
1159 module_exit(dccp_v6_exit
);
1162 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1163 * values directly, Also cover the case where the protocol is not specified,
1164 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1166 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6
, 33, 6);
1167 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6
, 0, 6);
1168 MODULE_LICENSE("GPL");
1169 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1170 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");