2 * Extension Header handling for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
17 * yoshfuji : ensure not to overrun while parsing
19 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
20 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
21 * handlers as inet6_protocol{}.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/netdevice.h>
30 #include <linux/in6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/slab.h>
33 #include <linux/export.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/rawv6.h>
43 #include <net/ndisc.h>
44 #include <net/ip6_route.h>
45 #include <net/addrconf.h>
46 #if IS_ENABLED(CONFIG_IPV6_MIP6)
50 #include <linux/uaccess.h>
53 * Parsing tlv encoded headers.
55 * Parsing function "func" returns true, if parsing succeed
56 * and false, if it failed.
57 * It MUST NOT touch skb->h.
62 bool (*func
)(struct sk_buff
*skb
, int offset
);
65 /*********************
67 *********************/
69 /* An unknown option is detected, decide what to do */
71 static bool ip6_tlvopt_unknown(struct sk_buff
*skb
, int optoff
)
73 switch ((skb_network_header(skb
)[optoff
] & 0xC0) >> 6) {
77 case 1: /* drop packet */
80 case 3: /* Send ICMP if not a multicast address and drop packet */
81 /* Actually, it is redundant check. icmp_send
82 will recheck in any case.
84 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
))
86 case 2: /* send ICMP PARM PROB regardless and drop packet */
87 icmpv6_param_prob(skb
, ICMPV6_UNK_OPTION
, optoff
);
95 /* Parse tlv encoded option header (hop-by-hop or destination) */
97 static bool ip6_parse_tlv(const struct tlvtype_proc
*procs
, struct sk_buff
*skb
)
99 const struct tlvtype_proc
*curr
;
100 const unsigned char *nh
= skb_network_header(skb
);
101 int off
= skb_network_header_len(skb
);
102 int len
= (skb_transport_header(skb
)[1] + 1) << 3;
105 if (skb_transport_offset(skb
) + len
> skb_headlen(skb
))
112 int optlen
= nh
[off
+ 1] + 2;
124 /* RFC 2460 states that the purpose of PadN is
125 * to align the containing header to multiples
126 * of 8. 7 is therefore the highest valid value.
127 * See also RFC 4942, Section 2.1.9.5.
132 /* RFC 4942 recommends receiving hosts to
133 * actively check PadN payload to contain
136 for (i
= 2; i
< optlen
; i
++) {
137 if (nh
[off
+ i
] != 0)
142 default: /* Other TLV code so scan list */
145 for (curr
= procs
; curr
->type
>= 0; curr
++) {
146 if (curr
->type
== nh
[off
]) {
147 /* type specific length/alignment
148 checks will be performed in the
150 if (curr
->func(skb
, off
) == false)
155 if (curr
->type
< 0) {
156 if (ip6_tlvopt_unknown(skb
, off
) == 0)
173 /*****************************
174 Destination options header.
175 *****************************/
177 #if IS_ENABLED(CONFIG_IPV6_MIP6)
178 static bool ipv6_dest_hao(struct sk_buff
*skb
, int optoff
)
180 struct ipv6_destopt_hao
*hao
;
181 struct inet6_skb_parm
*opt
= IP6CB(skb
);
182 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
183 struct in6_addr tmp_addr
;
187 net_dbg_ratelimited("hao duplicated\n");
190 opt
->dsthao
= opt
->dst1
;
193 hao
= (struct ipv6_destopt_hao
*)(skb_network_header(skb
) + optoff
);
195 if (hao
->length
!= 16) {
196 net_dbg_ratelimited("hao invalid option length = %d\n",
201 if (!(ipv6_addr_type(&hao
->addr
) & IPV6_ADDR_UNICAST
)) {
202 net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
207 ret
= xfrm6_input_addr(skb
, (xfrm_address_t
*)&ipv6h
->daddr
,
208 (xfrm_address_t
*)&hao
->addr
, IPPROTO_DSTOPTS
);
209 if (unlikely(ret
< 0))
212 if (skb_cloned(skb
)) {
213 if (pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
216 /* update all variable using below by copied skbuff */
217 hao
= (struct ipv6_destopt_hao
*)(skb_network_header(skb
) +
219 ipv6h
= ipv6_hdr(skb
);
222 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
223 skb
->ip_summed
= CHECKSUM_NONE
;
225 tmp_addr
= ipv6h
->saddr
;
226 ipv6h
->saddr
= hao
->addr
;
227 hao
->addr
= tmp_addr
;
229 if (skb
->tstamp
.tv64
== 0)
230 __net_timestamp(skb
);
240 static const struct tlvtype_proc tlvprocdestopt_lst
[] = {
241 #if IS_ENABLED(CONFIG_IPV6_MIP6)
243 .type
= IPV6_TLV_HAO
,
244 .func
= ipv6_dest_hao
,
250 static int ipv6_destopt_rcv(struct sk_buff
*skb
)
252 struct inet6_skb_parm
*opt
= IP6CB(skb
);
253 #if IS_ENABLED(CONFIG_IPV6_MIP6)
256 struct dst_entry
*dst
= skb_dst(skb
);
258 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + 8) ||
259 !pskb_may_pull(skb
, (skb_transport_offset(skb
) +
260 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
261 IP6_INC_STATS_BH(dev_net(dst
->dev
), ip6_dst_idev(dst
),
262 IPSTATS_MIB_INHDRERRORS
);
267 opt
->lastopt
= opt
->dst1
= skb_network_header_len(skb
);
268 #if IS_ENABLED(CONFIG_IPV6_MIP6)
272 if (ip6_parse_tlv(tlvprocdestopt_lst
, skb
)) {
273 skb
->transport_header
+= (skb_transport_header(skb
)[1] + 1) << 3;
275 #if IS_ENABLED(CONFIG_IPV6_MIP6)
278 opt
->nhoff
= opt
->dst1
;
283 IP6_INC_STATS_BH(dev_net(dst
->dev
),
284 ip6_dst_idev(dst
), IPSTATS_MIB_INHDRERRORS
);
288 /********************************
290 ********************************/
292 /* called with rcu_read_lock() */
293 static int ipv6_rthdr_rcv(struct sk_buff
*skb
)
295 struct inet6_skb_parm
*opt
= IP6CB(skb
);
296 struct in6_addr
*addr
= NULL
;
297 struct in6_addr daddr
;
298 struct inet6_dev
*idev
;
300 struct ipv6_rt_hdr
*hdr
;
301 struct rt0_hdr
*rthdr
;
302 struct net
*net
= dev_net(skb
->dev
);
303 int accept_source_route
= net
->ipv6
.devconf_all
->accept_source_route
;
305 idev
= __in6_dev_get(skb
->dev
);
306 if (idev
&& accept_source_route
> idev
->cnf
.accept_source_route
)
307 accept_source_route
= idev
->cnf
.accept_source_route
;
309 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + 8) ||
310 !pskb_may_pull(skb
, (skb_transport_offset(skb
) +
311 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
312 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
313 IPSTATS_MIB_INHDRERRORS
);
318 hdr
= (struct ipv6_rt_hdr
*)skb_transport_header(skb
);
320 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
) ||
321 skb
->pkt_type
!= PACKET_HOST
) {
322 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
323 IPSTATS_MIB_INADDRERRORS
);
329 if (hdr
->segments_left
== 0) {
331 #if IS_ENABLED(CONFIG_IPV6_MIP6)
332 case IPV6_SRCRT_TYPE_2
:
333 /* Silently discard type 2 header unless it was
337 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
338 IPSTATS_MIB_INADDRERRORS
);
348 opt
->lastopt
= opt
->srcrt
= skb_network_header_len(skb
);
349 skb
->transport_header
+= (hdr
->hdrlen
+ 1) << 3;
350 opt
->dst0
= opt
->dst1
;
352 opt
->nhoff
= (&hdr
->nexthdr
) - skb_network_header(skb
);
357 #if IS_ENABLED(CONFIG_IPV6_MIP6)
358 case IPV6_SRCRT_TYPE_2
:
359 if (accept_source_route
< 0)
361 /* Silently discard invalid RTH type 2 */
362 if (hdr
->hdrlen
!= 2 || hdr
->segments_left
!= 1) {
363 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
364 IPSTATS_MIB_INHDRERRORS
);
375 * This is the routing header forwarding algorithm from
379 n
= hdr
->hdrlen
>> 1;
381 if (hdr
->segments_left
> n
) {
382 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
383 IPSTATS_MIB_INHDRERRORS
);
384 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
385 ((&hdr
->segments_left
) -
386 skb_network_header(skb
)));
390 /* We are about to mangle packet header. Be careful!
391 Do not damage packets queued somewhere.
393 if (skb_cloned(skb
)) {
394 /* the copy is a forwarded packet */
395 if (pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
396 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
397 IPSTATS_MIB_OUTDISCARDS
);
401 hdr
= (struct ipv6_rt_hdr
*)skb_transport_header(skb
);
404 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
405 skb
->ip_summed
= CHECKSUM_NONE
;
407 i
= n
- --hdr
->segments_left
;
409 rthdr
= (struct rt0_hdr
*) hdr
;
414 #if IS_ENABLED(CONFIG_IPV6_MIP6)
415 case IPV6_SRCRT_TYPE_2
:
416 if (xfrm6_input_addr(skb
, (xfrm_address_t
*)addr
,
417 (xfrm_address_t
*)&ipv6_hdr(skb
)->saddr
,
418 IPPROTO_ROUTING
) < 0) {
419 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
420 IPSTATS_MIB_INADDRERRORS
);
424 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb
)->dev
), addr
)) {
425 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
426 IPSTATS_MIB_INADDRERRORS
);
436 if (ipv6_addr_is_multicast(addr
)) {
437 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
438 IPSTATS_MIB_INADDRERRORS
);
444 *addr
= ipv6_hdr(skb
)->daddr
;
445 ipv6_hdr(skb
)->daddr
= daddr
;
448 ip6_route_input(skb
);
449 if (skb_dst(skb
)->error
) {
450 skb_push(skb
, skb
->data
- skb_network_header(skb
));
455 if (skb_dst(skb
)->dev
->flags
&IFF_LOOPBACK
) {
456 if (ipv6_hdr(skb
)->hop_limit
<= 1) {
457 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
458 IPSTATS_MIB_INHDRERRORS
);
459 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
464 ipv6_hdr(skb
)->hop_limit
--;
468 skb_push(skb
, skb
->data
- skb_network_header(skb
));
473 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_INHDRERRORS
);
474 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
475 (&hdr
->type
) - skb_network_header(skb
));
479 static const struct inet6_protocol rthdr_protocol
= {
480 .handler
= ipv6_rthdr_rcv
,
481 .flags
= INET6_PROTO_NOPOLICY
,
484 static const struct inet6_protocol destopt_protocol
= {
485 .handler
= ipv6_destopt_rcv
,
486 .flags
= INET6_PROTO_NOPOLICY
,
489 static const struct inet6_protocol nodata_protocol
= {
490 .handler
= dst_discard
,
491 .flags
= INET6_PROTO_NOPOLICY
,
494 int __init
ipv6_exthdrs_init(void)
498 ret
= inet6_add_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
502 ret
= inet6_add_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
506 ret
= inet6_add_protocol(&nodata_protocol
, IPPROTO_NONE
);
513 inet6_del_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
515 inet6_del_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
519 void ipv6_exthdrs_exit(void)
521 inet6_del_protocol(&nodata_protocol
, IPPROTO_NONE
);
522 inet6_del_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
523 inet6_del_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
526 /**********************************
528 **********************************/
531 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
533 static inline struct inet6_dev
*ipv6_skb_idev(struct sk_buff
*skb
)
535 return skb_dst(skb
) ? ip6_dst_idev(skb_dst(skb
)) : __in6_dev_get(skb
->dev
);
538 static inline struct net
*ipv6_skb_net(struct sk_buff
*skb
)
540 return skb_dst(skb
) ? dev_net(skb_dst(skb
)->dev
) : dev_net(skb
->dev
);
543 /* Router Alert as of RFC 2711 */
545 static bool ipv6_hop_ra(struct sk_buff
*skb
, int optoff
)
547 const unsigned char *nh
= skb_network_header(skb
);
549 if (nh
[optoff
+ 1] == 2) {
550 IP6CB(skb
)->flags
|= IP6SKB_ROUTERALERT
;
551 memcpy(&IP6CB(skb
)->ra
, nh
+ optoff
+ 2, sizeof(IP6CB(skb
)->ra
));
554 net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
562 static bool ipv6_hop_jumbo(struct sk_buff
*skb
, int optoff
)
564 const unsigned char *nh
= skb_network_header(skb
);
565 struct net
*net
= ipv6_skb_net(skb
);
568 if (nh
[optoff
+ 1] != 4 || (optoff
& 3) != 2) {
569 net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
571 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
572 IPSTATS_MIB_INHDRERRORS
);
576 pkt_len
= ntohl(*(__be32
*)(nh
+ optoff
+ 2));
577 if (pkt_len
<= IPV6_MAXPLEN
) {
578 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
579 IPSTATS_MIB_INHDRERRORS
);
580 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
+2);
583 if (ipv6_hdr(skb
)->payload_len
) {
584 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
585 IPSTATS_MIB_INHDRERRORS
);
586 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
);
590 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
)) {
591 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
592 IPSTATS_MIB_INTRUNCATEDPKTS
);
596 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
606 static const struct tlvtype_proc tlvprochopopt_lst
[] = {
608 .type
= IPV6_TLV_ROUTERALERT
,
612 .type
= IPV6_TLV_JUMBO
,
613 .func
= ipv6_hop_jumbo
,
618 int ipv6_parse_hopopts(struct sk_buff
*skb
)
620 struct inet6_skb_parm
*opt
= IP6CB(skb
);
623 * skb_network_header(skb) is equal to skb->data, and
624 * skb_network_header_len(skb) is always equal to
625 * sizeof(struct ipv6hdr) by definition of
626 * hop-by-hop options.
628 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + 8) ||
629 !pskb_may_pull(skb
, (sizeof(struct ipv6hdr
) +
630 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
635 opt
->hop
= sizeof(struct ipv6hdr
);
636 if (ip6_parse_tlv(tlvprochopopt_lst
, skb
)) {
637 skb
->transport_header
+= (skb_transport_header(skb
)[1] + 1) << 3;
639 opt
->nhoff
= sizeof(struct ipv6hdr
);
646 * Creating outbound headers.
648 * "build" functions work when skb is filled from head to tail (datagram)
649 * "push" functions work when headers are added from tail to head (tcp)
651 * In both cases we assume, that caller reserved enough room
655 static void ipv6_push_rthdr(struct sk_buff
*skb
, u8
*proto
,
656 struct ipv6_rt_hdr
*opt
,
657 struct in6_addr
**addr_p
)
659 struct rt0_hdr
*phdr
, *ihdr
;
662 ihdr
= (struct rt0_hdr
*) opt
;
664 phdr
= (struct rt0_hdr
*) skb_push(skb
, (ihdr
->rt_hdr
.hdrlen
+ 1) << 3);
665 memcpy(phdr
, ihdr
, sizeof(struct rt0_hdr
));
667 hops
= ihdr
->rt_hdr
.hdrlen
>> 1;
670 memcpy(phdr
->addr
, ihdr
->addr
+ 1,
671 (hops
- 1) * sizeof(struct in6_addr
));
673 phdr
->addr
[hops
- 1] = **addr_p
;
674 *addr_p
= ihdr
->addr
;
676 phdr
->rt_hdr
.nexthdr
= *proto
;
677 *proto
= NEXTHDR_ROUTING
;
680 static void ipv6_push_exthdr(struct sk_buff
*skb
, u8
*proto
, u8 type
, struct ipv6_opt_hdr
*opt
)
682 struct ipv6_opt_hdr
*h
= (struct ipv6_opt_hdr
*)skb_push(skb
, ipv6_optlen(opt
));
684 memcpy(h
, opt
, ipv6_optlen(opt
));
689 void ipv6_push_nfrag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
,
691 struct in6_addr
**daddr
)
694 ipv6_push_rthdr(skb
, proto
, opt
->srcrt
, daddr
);
696 * IPV6_RTHDRDSTOPTS is ignored
697 * unless IPV6_RTHDR is set (RFC3542).
700 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst0opt
);
703 ipv6_push_exthdr(skb
, proto
, NEXTHDR_HOP
, opt
->hopopt
);
705 EXPORT_SYMBOL(ipv6_push_nfrag_opts
);
707 void ipv6_push_frag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
, u8
*proto
)
710 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst1opt
);
713 struct ipv6_txoptions
*
714 ipv6_dup_options(struct sock
*sk
, struct ipv6_txoptions
*opt
)
716 struct ipv6_txoptions
*opt2
;
718 opt2
= sock_kmalloc(sk
, opt
->tot_len
, GFP_ATOMIC
);
720 long dif
= (char *)opt2
- (char *)opt
;
721 memcpy(opt2
, opt
, opt
->tot_len
);
723 *((char **)&opt2
->hopopt
) += dif
;
725 *((char **)&opt2
->dst0opt
) += dif
;
727 *((char **)&opt2
->dst1opt
) += dif
;
729 *((char **)&opt2
->srcrt
) += dif
;
730 atomic_set(&opt2
->refcnt
, 1);
734 EXPORT_SYMBOL_GPL(ipv6_dup_options
);
736 static int ipv6_renew_option(void *ohdr
,
737 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
,
739 struct ipv6_opt_hdr
**hdr
,
744 memcpy(*p
, ohdr
, ipv6_optlen((struct ipv6_opt_hdr
*)ohdr
));
745 *hdr
= (struct ipv6_opt_hdr
*)*p
;
746 *p
+= CMSG_ALIGN(ipv6_optlen(*hdr
));
750 if (copy_from_user(*p
, newopt
, newoptlen
))
752 *hdr
= (struct ipv6_opt_hdr
*)*p
;
753 if (ipv6_optlen(*hdr
) > newoptlen
)
755 *p
+= CMSG_ALIGN(newoptlen
);
761 struct ipv6_txoptions
*
762 ipv6_renew_options(struct sock
*sk
, struct ipv6_txoptions
*opt
,
764 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
)
768 struct ipv6_txoptions
*opt2
;
772 if (newtype
!= IPV6_HOPOPTS
&& opt
->hopopt
)
773 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->hopopt
));
774 if (newtype
!= IPV6_RTHDRDSTOPTS
&& opt
->dst0opt
)
775 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst0opt
));
776 if (newtype
!= IPV6_RTHDR
&& opt
->srcrt
)
777 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->srcrt
));
778 if (newtype
!= IPV6_DSTOPTS
&& opt
->dst1opt
)
779 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst1opt
));
782 if (newopt
&& newoptlen
)
783 tot_len
+= CMSG_ALIGN(newoptlen
);
788 tot_len
+= sizeof(*opt2
);
789 opt2
= sock_kmalloc(sk
, tot_len
, GFP_ATOMIC
);
791 return ERR_PTR(-ENOBUFS
);
793 memset(opt2
, 0, tot_len
);
794 atomic_set(&opt2
->refcnt
, 1);
795 opt2
->tot_len
= tot_len
;
796 p
= (char *)(opt2
+ 1);
798 err
= ipv6_renew_option(opt
? opt
->hopopt
: NULL
, newopt
, newoptlen
,
799 newtype
!= IPV6_HOPOPTS
,
804 err
= ipv6_renew_option(opt
? opt
->dst0opt
: NULL
, newopt
, newoptlen
,
805 newtype
!= IPV6_RTHDRDSTOPTS
,
810 err
= ipv6_renew_option(opt
? opt
->srcrt
: NULL
, newopt
, newoptlen
,
811 newtype
!= IPV6_RTHDR
,
812 (struct ipv6_opt_hdr
**)&opt2
->srcrt
, &p
);
816 err
= ipv6_renew_option(opt
? opt
->dst1opt
: NULL
, newopt
, newoptlen
,
817 newtype
!= IPV6_DSTOPTS
,
822 opt2
->opt_nflen
= (opt2
->hopopt
? ipv6_optlen(opt2
->hopopt
) : 0) +
823 (opt2
->dst0opt
? ipv6_optlen(opt2
->dst0opt
) : 0) +
824 (opt2
->srcrt
? ipv6_optlen(opt2
->srcrt
) : 0);
825 opt2
->opt_flen
= (opt2
->dst1opt
? ipv6_optlen(opt2
->dst1opt
) : 0);
829 sock_kfree_s(sk
, opt2
, opt2
->tot_len
);
833 struct ipv6_txoptions
*ipv6_fixup_options(struct ipv6_txoptions
*opt_space
,
834 struct ipv6_txoptions
*opt
)
837 * ignore the dest before srcrt unless srcrt is being included.
840 if (opt
&& opt
->dst0opt
&& !opt
->srcrt
) {
841 if (opt_space
!= opt
) {
842 memcpy(opt_space
, opt
, sizeof(*opt_space
));
845 opt
->opt_nflen
-= ipv6_optlen(opt
->dst0opt
);
851 EXPORT_SYMBOL_GPL(ipv6_fixup_options
);
854 * fl6_update_dst - update flowi destination address with info given
855 * by srcrt option, if any.
857 * @fl6: flowi6 for which daddr is to be updated
858 * @opt: struct ipv6_txoptions in which to look for srcrt opt
859 * @orig: copy of original daddr address if modified
861 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
862 * and initial value of fl6->daddr set in orig
864 struct in6_addr
*fl6_update_dst(struct flowi6
*fl6
,
865 const struct ipv6_txoptions
*opt
,
866 struct in6_addr
*orig
)
868 if (!opt
|| !opt
->srcrt
)
872 fl6
->daddr
= *((struct rt0_hdr
*)opt
->srcrt
)->addr
;
875 EXPORT_SYMBOL_GPL(fl6_update_dst
);