2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/bpf-cgroup.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
58 #include <linux/mroute6.h>
59 #include <net/l3mdev.h>
60 #include <net/lwtunnel.h>
62 static int ip6_finish_output2(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
64 struct dst_entry
*dst
= skb_dst(skb
);
65 struct net_device
*dev
= dst
->dev
;
66 struct neighbour
*neigh
;
67 struct in6_addr
*nexthop
;
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
)) {
71 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
73 if (!(dev
->flags
& IFF_LOOPBACK
) && sk_mc_loop(sk
) &&
74 ((mroute6_is_socket(net
, skb
) &&
75 !(IP6CB(skb
)->flags
& IP6SKB_FORWARDED
)) ||
76 ipv6_chk_mcast_addr(dev
, &ipv6_hdr(skb
)->daddr
,
77 &ipv6_hdr(skb
)->saddr
))) {
78 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
84 NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
85 net
, sk
, newskb
, NULL
, newskb
->dev
,
88 if (ipv6_hdr(skb
)->hop_limit
== 0) {
89 IP6_INC_STATS(net
, idev
,
90 IPSTATS_MIB_OUTDISCARDS
);
96 IP6_UPD_PO_STATS(net
, idev
, IPSTATS_MIB_OUTMCAST
, skb
->len
);
98 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb
)->daddr
) <=
99 IPV6_ADDR_SCOPE_NODELOCAL
&&
100 !(dev
->flags
& IFF_LOOPBACK
)) {
106 if (lwtunnel_xmit_redirect(dst
->lwtstate
)) {
107 int res
= lwtunnel_xmit(skb
);
109 if (res
< 0 || res
== LWTUNNEL_XMIT_DONE
)
114 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
115 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
116 if (unlikely(!neigh
))
117 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
118 if (!IS_ERR(neigh
)) {
119 sock_confirm_neigh(skb
, neigh
);
120 ret
= neigh_output(neigh
, skb
);
121 rcu_read_unlock_bh();
124 rcu_read_unlock_bh();
126 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
131 static int ip6_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
135 ret
= BPF_CGROUP_RUN_PROG_INET_EGRESS(sk
, skb
);
141 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
142 /* Policy lookup after SNAT yielded a new policy */
143 if (skb_dst(skb
)->xfrm
) {
144 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
145 return dst_output(net
, sk
, skb
);
149 if ((skb
->len
> ip6_skb_dst_mtu(skb
) && !skb_is_gso(skb
)) ||
150 dst_allfrag(skb_dst(skb
)) ||
151 (IP6CB(skb
)->frag_max_size
&& skb
->len
> IP6CB(skb
)->frag_max_size
))
152 return ip6_fragment(net
, sk
, skb
, ip6_finish_output2
);
154 return ip6_finish_output2(net
, sk
, skb
);
157 int ip6_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
159 struct net_device
*dev
= skb_dst(skb
)->dev
;
160 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
162 skb
->protocol
= htons(ETH_P_IPV6
);
165 if (unlikely(idev
->cnf
.disable_ipv6
)) {
166 IP6_INC_STATS(net
, idev
, IPSTATS_MIB_OUTDISCARDS
);
171 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
172 net
, sk
, skb
, NULL
, dev
,
174 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
177 bool ip6_autoflowlabel(struct net
*net
, const struct ipv6_pinfo
*np
)
179 if (!np
->autoflowlabel_set
)
180 return ip6_default_np_autolabel(net
);
182 return np
->autoflowlabel
;
186 * xmit an sk_buff (used by TCP, SCTP and DCCP)
187 * Note : socket lock is not held for SYNACK packets, but might be modified
188 * by calls to skb_set_owner_w() and ipv6_local_error(),
189 * which are using proper atomic operations or spinlocks.
191 int ip6_xmit(const struct sock
*sk
, struct sk_buff
*skb
, struct flowi6
*fl6
,
192 __u32 mark
, struct ipv6_txoptions
*opt
, int tclass
)
194 struct net
*net
= sock_net(sk
);
195 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
196 struct in6_addr
*first_hop
= &fl6
->daddr
;
197 struct dst_entry
*dst
= skb_dst(skb
);
198 unsigned int head_room
;
200 u8 proto
= fl6
->flowi6_proto
;
201 int seg_len
= skb
->len
;
205 head_room
= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
207 head_room
+= opt
->opt_nflen
+ opt
->opt_flen
;
209 if (unlikely(skb_headroom(skb
) < head_room
)) {
210 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
212 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
213 IPSTATS_MIB_OUTDISCARDS
);
218 skb_set_owner_w(skb2
, skb
->sk
);
224 seg_len
+= opt
->opt_nflen
+ opt
->opt_flen
;
227 ipv6_push_frag_opts(skb
, opt
, &proto
);
230 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
,
234 skb_push(skb
, sizeof(struct ipv6hdr
));
235 skb_reset_network_header(skb
);
239 * Fill in the IPv6 header
242 hlimit
= np
->hop_limit
;
244 hlimit
= ip6_dst_hoplimit(dst
);
246 ip6_flow_hdr(hdr
, tclass
, ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
,
247 ip6_autoflowlabel(net
, np
), fl6
));
249 hdr
->payload_len
= htons(seg_len
);
250 hdr
->nexthdr
= proto
;
251 hdr
->hop_limit
= hlimit
;
253 hdr
->saddr
= fl6
->saddr
;
254 hdr
->daddr
= *first_hop
;
256 skb
->protocol
= htons(ETH_P_IPV6
);
257 skb
->priority
= sk
->sk_priority
;
261 if ((skb
->len
<= mtu
) || skb
->ignore_df
|| skb_is_gso(skb
)) {
262 IP6_UPD_PO_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
263 IPSTATS_MIB_OUT
, skb
->len
);
265 /* if egress device is enslaved to an L3 master device pass the
266 * skb to its handler for processing
268 skb
= l3mdev_ip6_out((struct sock
*)sk
, skb
);
272 /* hooks should never assume socket lock is held.
273 * we promote our socket to non const
275 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
,
276 net
, (struct sock
*)sk
, skb
, NULL
, dst
->dev
,
281 /* ipv6_local_error() does not require socket lock,
282 * we promote our socket to non const
284 ipv6_local_error((struct sock
*)sk
, EMSGSIZE
, fl6
, mtu
);
286 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_FRAGFAILS
);
290 EXPORT_SYMBOL(ip6_xmit
);
292 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
294 struct ip6_ra_chain
*ra
;
295 struct sock
*last
= NULL
;
297 read_lock(&ip6_ra_lock
);
298 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
299 struct sock
*sk
= ra
->sk
;
300 if (sk
&& ra
->sel
== sel
&&
301 (!sk
->sk_bound_dev_if
||
302 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
304 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
306 rawv6_rcv(last
, skb2
);
313 rawv6_rcv(last
, skb
);
314 read_unlock(&ip6_ra_lock
);
317 read_unlock(&ip6_ra_lock
);
321 static int ip6_forward_proxy_check(struct sk_buff
*skb
)
323 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
324 u8 nexthdr
= hdr
->nexthdr
;
328 if (ipv6_ext_hdr(nexthdr
)) {
329 offset
= ipv6_skip_exthdr(skb
, sizeof(*hdr
), &nexthdr
, &frag_off
);
333 offset
= sizeof(struct ipv6hdr
);
335 if (nexthdr
== IPPROTO_ICMPV6
) {
336 struct icmp6hdr
*icmp6
;
338 if (!pskb_may_pull(skb
, (skb_network_header(skb
) +
339 offset
+ 1 - skb
->data
)))
342 icmp6
= (struct icmp6hdr
*)(skb_network_header(skb
) + offset
);
344 switch (icmp6
->icmp6_type
) {
345 case NDISC_ROUTER_SOLICITATION
:
346 case NDISC_ROUTER_ADVERTISEMENT
:
347 case NDISC_NEIGHBOUR_SOLICITATION
:
348 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
350 /* For reaction involving unicast neighbor discovery
351 * message destined to the proxied address, pass it to
361 * The proxying router can't forward traffic sent to a link-local
362 * address, so signal the sender and discard the packet. This
363 * behavior is clarified by the MIPv6 specification.
365 if (ipv6_addr_type(&hdr
->daddr
) & IPV6_ADDR_LINKLOCAL
) {
366 dst_link_failure(skb
);
373 static inline int ip6_forward_finish(struct net
*net
, struct sock
*sk
,
376 struct dst_entry
*dst
= skb_dst(skb
);
378 __IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
379 __IP6_ADD_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTOCTETS
, skb
->len
);
381 return dst_output(net
, sk
, skb
);
384 static bool ip6_pkt_too_big(const struct sk_buff
*skb
, unsigned int mtu
)
389 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
390 if (IP6CB(skb
)->frag_max_size
&& IP6CB(skb
)->frag_max_size
> mtu
)
396 if (skb_is_gso(skb
) && skb_gso_validate_network_len(skb
, mtu
))
402 int ip6_forward(struct sk_buff
*skb
)
404 struct inet6_dev
*idev
= __in6_dev_get_safely(skb
->dev
);
405 struct dst_entry
*dst
= skb_dst(skb
);
406 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
407 struct inet6_skb_parm
*opt
= IP6CB(skb
);
408 struct net
*net
= dev_net(dst
->dev
);
411 if (net
->ipv6
.devconf_all
->forwarding
== 0)
414 if (skb
->pkt_type
!= PACKET_HOST
)
417 if (unlikely(skb
->sk
))
420 if (skb_warn_if_lro(skb
))
423 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
424 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INDISCARDS
);
428 skb_forward_csum(skb
);
431 * We DO NOT make any processing on
432 * RA packets, pushing them to user level AS IS
433 * without ane WARRANTY that application will be able
434 * to interpret them. The reason is that we
435 * cannot make anything clever here.
437 * We are not end-node, so that if packet contains
438 * AH/ESP, we cannot make anything.
439 * Defragmentation also would be mistake, RA packets
440 * cannot be fragmented, because there is no warranty
441 * that different fragments will go along one path. --ANK
443 if (unlikely(opt
->flags
& IP6SKB_ROUTERALERT
)) {
444 if (ip6_call_ra_chain(skb
, ntohs(opt
->ra
)))
449 * check and decrement ttl
451 if (hdr
->hop_limit
<= 1) {
452 /* Force OUTPUT device used as source address */
454 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
, 0);
455 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INHDRERRORS
);
461 /* XXX: idev->cnf.proxy_ndp? */
462 if (net
->ipv6
.devconf_all
->proxy_ndp
&&
463 pneigh_lookup(&nd_tbl
, net
, &hdr
->daddr
, skb
->dev
, 0)) {
464 int proxied
= ip6_forward_proxy_check(skb
);
466 return ip6_input(skb
);
467 else if (proxied
< 0) {
468 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INDISCARDS
);
473 if (!xfrm6_route_forward(skb
)) {
474 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INDISCARDS
);
479 /* IPv6 specs say nothing about it, but it is clear that we cannot
480 send redirects to source routed frames.
481 We don't send redirects to frames decapsulated from IPsec.
483 if (IP6CB(skb
)->iif
== dst
->dev
->ifindex
&&
484 opt
->srcrt
== 0 && !skb_sec_path(skb
)) {
485 struct in6_addr
*target
= NULL
;
486 struct inet_peer
*peer
;
490 * incoming and outgoing devices are the same
494 rt
= (struct rt6_info
*) dst
;
495 if (rt
->rt6i_flags
& RTF_GATEWAY
)
496 target
= &rt
->rt6i_gateway
;
498 target
= &hdr
->daddr
;
500 peer
= inet_getpeer_v6(net
->ipv6
.peers
, &hdr
->daddr
, 1);
502 /* Limit redirects both by destination (here)
503 and by source (inside ndisc_send_redirect)
505 if (inet_peer_xrlim_allow(peer
, 1*HZ
))
506 ndisc_send_redirect(skb
, target
);
510 int addrtype
= ipv6_addr_type(&hdr
->saddr
);
512 /* This check is security critical. */
513 if (addrtype
== IPV6_ADDR_ANY
||
514 addrtype
& (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LOOPBACK
))
516 if (addrtype
& IPV6_ADDR_LINKLOCAL
) {
517 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
,
518 ICMPV6_NOT_NEIGHBOUR
, 0);
523 mtu
= ip6_dst_mtu_forward(dst
);
524 if (mtu
< IPV6_MIN_MTU
)
527 if (ip6_pkt_too_big(skb
, mtu
)) {
528 /* Again, force OUTPUT device used as source address */
530 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
531 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INTOOBIGERRORS
);
532 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
533 IPSTATS_MIB_FRAGFAILS
);
538 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
539 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
540 IPSTATS_MIB_OUTDISCARDS
);
546 /* Mangling hops number delayed to point after skb COW */
550 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
,
551 net
, NULL
, skb
, skb
->dev
, dst
->dev
,
555 __IP6_INC_STATS(net
, idev
, IPSTATS_MIB_INADDRERRORS
);
561 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
563 to
->pkt_type
= from
->pkt_type
;
564 to
->priority
= from
->priority
;
565 to
->protocol
= from
->protocol
;
567 skb_dst_set(to
, dst_clone(skb_dst(from
)));
569 to
->mark
= from
->mark
;
571 skb_copy_hash(to
, from
);
573 #ifdef CONFIG_NET_SCHED
574 to
->tc_index
= from
->tc_index
;
577 skb_copy_secmark(to
, from
);
580 int ip6_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
581 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
583 struct sk_buff
*frag
;
584 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
585 struct ipv6_pinfo
*np
= skb
->sk
&& !dev_recursion_level() ?
586 inet6_sk(skb
->sk
) : NULL
;
587 struct ipv6hdr
*tmp_hdr
;
589 unsigned int mtu
, hlen
, left
, len
;
592 int ptr
, offset
= 0, err
= 0;
593 u8
*prevhdr
, nexthdr
= 0;
595 err
= ip6_find_1stfragopt(skb
, &prevhdr
);
601 mtu
= ip6_skb_dst_mtu(skb
);
603 /* We must not fragment if the socket is set to force MTU discovery
604 * or if the skb it not generated by a local socket.
606 if (unlikely(!skb
->ignore_df
&& skb
->len
> mtu
))
609 if (IP6CB(skb
)->frag_max_size
) {
610 if (IP6CB(skb
)->frag_max_size
> mtu
)
613 /* don't send fragments larger than what we received */
614 mtu
= IP6CB(skb
)->frag_max_size
;
615 if (mtu
< IPV6_MIN_MTU
)
619 if (np
&& np
->frag_size
< mtu
) {
623 if (mtu
< hlen
+ sizeof(struct frag_hdr
) + 8)
625 mtu
-= hlen
+ sizeof(struct frag_hdr
);
627 frag_id
= ipv6_select_ident(net
, &ipv6_hdr(skb
)->daddr
,
628 &ipv6_hdr(skb
)->saddr
);
630 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
631 (err
= skb_checksum_help(skb
)))
634 hroom
= LL_RESERVED_SPACE(rt
->dst
.dev
);
635 if (skb_has_frag_list(skb
)) {
636 unsigned int first_len
= skb_pagelen(skb
);
637 struct sk_buff
*frag2
;
639 if (first_len
- hlen
> mtu
||
640 ((first_len
- hlen
) & 7) ||
642 skb_headroom(skb
) < (hroom
+ sizeof(struct frag_hdr
)))
645 skb_walk_frags(skb
, frag
) {
646 /* Correct geometry. */
647 if (frag
->len
> mtu
||
648 ((frag
->len
& 7) && frag
->next
) ||
649 skb_headroom(frag
) < (hlen
+ hroom
+ sizeof(struct frag_hdr
)))
650 goto slow_path_clean
;
652 /* Partially cloned skb? */
653 if (skb_shared(frag
))
654 goto slow_path_clean
;
659 frag
->destructor
= sock_wfree
;
661 skb
->truesize
-= frag
->truesize
;
668 *prevhdr
= NEXTHDR_FRAGMENT
;
669 tmp_hdr
= kmemdup(skb_network_header(skb
), hlen
, GFP_ATOMIC
);
674 frag
= skb_shinfo(skb
)->frag_list
;
675 skb_frag_list_init(skb
);
677 __skb_pull(skb
, hlen
);
678 fh
= __skb_push(skb
, sizeof(struct frag_hdr
));
679 __skb_push(skb
, hlen
);
680 skb_reset_network_header(skb
);
681 memcpy(skb_network_header(skb
), tmp_hdr
, hlen
);
683 fh
->nexthdr
= nexthdr
;
685 fh
->frag_off
= htons(IP6_MF
);
686 fh
->identification
= frag_id
;
688 first_len
= skb_pagelen(skb
);
689 skb
->data_len
= first_len
- skb_headlen(skb
);
690 skb
->len
= first_len
;
691 ipv6_hdr(skb
)->payload_len
= htons(first_len
-
692 sizeof(struct ipv6hdr
));
695 /* Prepare header of the next frame,
696 * before previous one went down. */
698 frag
->ip_summed
= CHECKSUM_NONE
;
699 skb_reset_transport_header(frag
);
700 fh
= __skb_push(frag
, sizeof(struct frag_hdr
));
701 __skb_push(frag
, hlen
);
702 skb_reset_network_header(frag
);
703 memcpy(skb_network_header(frag
), tmp_hdr
,
705 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
706 fh
->nexthdr
= nexthdr
;
708 fh
->frag_off
= htons(offset
);
710 fh
->frag_off
|= htons(IP6_MF
);
711 fh
->identification
= frag_id
;
712 ipv6_hdr(frag
)->payload_len
=
714 sizeof(struct ipv6hdr
));
715 ip6_copy_metadata(frag
, skb
);
718 err
= output(net
, sk
, skb
);
720 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
721 IPSTATS_MIB_FRAGCREATES
);
728 skb_mark_not_on_list(skb
);
734 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
735 IPSTATS_MIB_FRAGOKS
);
739 kfree_skb_list(frag
);
741 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
742 IPSTATS_MIB_FRAGFAILS
);
746 skb_walk_frags(skb
, frag2
) {
750 frag2
->destructor
= NULL
;
751 skb
->truesize
+= frag2
->truesize
;
756 left
= skb
->len
- hlen
; /* Space per frame */
757 ptr
= hlen
; /* Where to start from */
760 * Fragment the datagram.
763 troom
= rt
->dst
.dev
->needed_tailroom
;
766 * Keep copying data until we run out.
769 u8
*fragnexthdr_offset
;
772 /* IF: it doesn't fit, use 'mtu' - the data space left */
775 /* IF: we are not sending up to and including the packet end
776 then align the next start on an eight byte boundary */
781 /* Allocate buffer */
782 frag
= alloc_skb(len
+ hlen
+ sizeof(struct frag_hdr
) +
783 hroom
+ troom
, GFP_ATOMIC
);
790 * Set up data on packet
793 ip6_copy_metadata(frag
, skb
);
794 skb_reserve(frag
, hroom
);
795 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
796 skb_reset_network_header(frag
);
797 fh
= (struct frag_hdr
*)(skb_network_header(frag
) + hlen
);
798 frag
->transport_header
= (frag
->network_header
+ hlen
+
799 sizeof(struct frag_hdr
));
802 * Charge the memory for the fragment to any owner
806 skb_set_owner_w(frag
, skb
->sk
);
809 * Copy the packet header into the new buffer.
811 skb_copy_from_linear_data(skb
, skb_network_header(frag
), hlen
);
813 fragnexthdr_offset
= skb_network_header(frag
);
814 fragnexthdr_offset
+= prevhdr
- skb_network_header(skb
);
815 *fragnexthdr_offset
= NEXTHDR_FRAGMENT
;
818 * Build fragment header.
820 fh
->nexthdr
= nexthdr
;
822 fh
->identification
= frag_id
;
825 * Copy a block of the IP datagram.
827 BUG_ON(skb_copy_bits(skb
, ptr
, skb_transport_header(frag
),
831 fh
->frag_off
= htons(offset
);
833 fh
->frag_off
|= htons(IP6_MF
);
834 ipv6_hdr(frag
)->payload_len
= htons(frag
->len
-
835 sizeof(struct ipv6hdr
));
841 * Put this fragment into the sending queue.
843 err
= output(net
, sk
, frag
);
847 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
848 IPSTATS_MIB_FRAGCREATES
);
850 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
851 IPSTATS_MIB_FRAGOKS
);
856 if (skb
->sk
&& dst_allfrag(skb_dst(skb
)))
857 sk_nocaps_add(skb
->sk
, NETIF_F_GSO_MASK
);
859 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
863 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
864 IPSTATS_MIB_FRAGFAILS
);
869 static inline int ip6_rt_check(const struct rt6key
*rt_key
,
870 const struct in6_addr
*fl_addr
,
871 const struct in6_addr
*addr_cache
)
873 return (rt_key
->plen
!= 128 || !ipv6_addr_equal(fl_addr
, &rt_key
->addr
)) &&
874 (!addr_cache
|| !ipv6_addr_equal(fl_addr
, addr_cache
));
877 static struct dst_entry
*ip6_sk_dst_check(struct sock
*sk
,
878 struct dst_entry
*dst
,
879 const struct flowi6
*fl6
)
881 struct ipv6_pinfo
*np
= inet6_sk(sk
);
887 if (dst
->ops
->family
!= AF_INET6
) {
892 rt
= (struct rt6_info
*)dst
;
893 /* Yes, checking route validity in not connected
894 * case is not very simple. Take into account,
895 * that we do not support routing by source, TOS,
896 * and MSG_DONTROUTE --ANK (980726)
898 * 1. ip6_rt_check(): If route was host route,
899 * check that cached destination is current.
900 * If it is network route, we still may
901 * check its validity using saved pointer
902 * to the last used address: daddr_cache.
903 * We do not want to save whole address now,
904 * (because main consumer of this service
905 * is tcp, which has not this problem),
906 * so that the last trick works only on connected
908 * 2. oif also should be the same.
910 if (ip6_rt_check(&rt
->rt6i_dst
, &fl6
->daddr
, np
->daddr_cache
) ||
911 #ifdef CONFIG_IPV6_SUBTREES
912 ip6_rt_check(&rt
->rt6i_src
, &fl6
->saddr
, np
->saddr_cache
) ||
914 (!(fl6
->flowi6_flags
& FLOWI_FLAG_SKIP_NH_OIF
) &&
915 (fl6
->flowi6_oif
&& fl6
->flowi6_oif
!= dst
->dev
->ifindex
))) {
924 static int ip6_dst_lookup_tail(struct net
*net
, const struct sock
*sk
,
925 struct dst_entry
**dst
, struct flowi6
*fl6
)
927 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
934 /* The correct way to handle this would be to do
935 * ip6_route_get_saddr, and then ip6_route_output; however,
936 * the route-specific preferred source forces the
937 * ip6_route_output call _before_ ip6_route_get_saddr.
939 * In source specific routing (no src=any default route),
940 * ip6_route_output will fail given src=any saddr, though, so
941 * that's why we try it again later.
943 if (ipv6_addr_any(&fl6
->saddr
) && (!*dst
|| !(*dst
)->error
)) {
944 struct fib6_info
*from
;
946 bool had_dst
= *dst
!= NULL
;
949 *dst
= ip6_route_output(net
, sk
, fl6
);
950 rt
= (*dst
)->error
? NULL
: (struct rt6_info
*)*dst
;
953 from
= rt
? rcu_dereference(rt
->from
) : NULL
;
954 err
= ip6_route_get_saddr(net
, from
, &fl6
->daddr
,
955 sk
? inet6_sk(sk
)->srcprefs
: 0,
960 goto out_err_release
;
962 /* If we had an erroneous initial result, pretend it
963 * never existed and let the SA-enabled version take
966 if (!had_dst
&& (*dst
)->error
) {
972 flags
|= RT6_LOOKUP_F_IFACE
;
976 *dst
= ip6_route_output_flags(net
, sk
, fl6
, flags
);
980 goto out_err_release
;
982 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
984 * Here if the dst entry we've looked up
985 * has a neighbour entry that is in the INCOMPLETE
986 * state and the src address from the flow is
987 * marked as OPTIMISTIC, we release the found
988 * dst entry and replace it instead with the
989 * dst entry of the nexthop router
991 rt
= (struct rt6_info
*) *dst
;
993 n
= __ipv6_neigh_lookup_noref(rt
->dst
.dev
,
994 rt6_nexthop(rt
, &fl6
->daddr
));
995 err
= n
&& !(n
->nud_state
& NUD_VALID
) ? -EINVAL
: 0;
996 rcu_read_unlock_bh();
999 struct inet6_ifaddr
*ifp
;
1000 struct flowi6 fl_gw6
;
1003 ifp
= ipv6_get_ifaddr(net
, &fl6
->saddr
,
1006 redirect
= (ifp
&& ifp
->flags
& IFA_F_OPTIMISTIC
);
1012 * We need to get the dst entry for the
1013 * default router instead
1016 memcpy(&fl_gw6
, fl6
, sizeof(struct flowi6
));
1017 memset(&fl_gw6
.daddr
, 0, sizeof(struct in6_addr
));
1018 *dst
= ip6_route_output(net
, sk
, &fl_gw6
);
1019 err
= (*dst
)->error
;
1021 goto out_err_release
;
1025 if (ipv6_addr_v4mapped(&fl6
->saddr
) &&
1026 !(ipv6_addr_v4mapped(&fl6
->daddr
) || ipv6_addr_any(&fl6
->daddr
))) {
1027 err
= -EAFNOSUPPORT
;
1028 goto out_err_release
;
1037 if (err
== -ENETUNREACH
)
1038 IP6_INC_STATS(net
, NULL
, IPSTATS_MIB_OUTNOROUTES
);
1043 * ip6_dst_lookup - perform route lookup on flow
1044 * @sk: socket which provides route info
1045 * @dst: pointer to dst_entry * for result
1046 * @fl6: flow to lookup
1048 * This function performs a route lookup on the given flow.
1050 * It returns zero on success, or a standard errno code on error.
1052 int ip6_dst_lookup(struct net
*net
, struct sock
*sk
, struct dst_entry
**dst
,
1056 return ip6_dst_lookup_tail(net
, sk
, dst
, fl6
);
1058 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
1061 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1062 * @sk: socket which provides route info
1063 * @fl6: flow to lookup
1064 * @final_dst: final destination address for ipsec lookup
1066 * This function performs a route lookup on the given flow.
1068 * It returns a valid dst pointer on success, or a pointer encoded
1071 struct dst_entry
*ip6_dst_lookup_flow(const struct sock
*sk
, struct flowi6
*fl6
,
1072 const struct in6_addr
*final_dst
)
1074 struct dst_entry
*dst
= NULL
;
1077 err
= ip6_dst_lookup_tail(sock_net(sk
), sk
, &dst
, fl6
);
1079 return ERR_PTR(err
);
1081 fl6
->daddr
= *final_dst
;
1083 return xfrm_lookup_route(sock_net(sk
), dst
, flowi6_to_flowi(fl6
), sk
, 0);
1085 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow
);
1088 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1089 * @sk: socket which provides the dst cache and route info
1090 * @fl6: flow to lookup
1091 * @final_dst: final destination address for ipsec lookup
1092 * @connected: whether @sk is connected or not
1094 * This function performs a route lookup on the given flow with the
1095 * possibility of using the cached route in the socket if it is valid.
1096 * It will take the socket dst lock when operating on the dst cache.
1097 * As a result, this function can only be used in process context.
1099 * In addition, for a connected socket, cache the dst in the socket
1100 * if the current cache is not valid.
1102 * It returns a valid dst pointer on success, or a pointer encoded
1105 struct dst_entry
*ip6_sk_dst_lookup_flow(struct sock
*sk
, struct flowi6
*fl6
,
1106 const struct in6_addr
*final_dst
,
1109 struct dst_entry
*dst
= sk_dst_check(sk
, inet6_sk(sk
)->dst_cookie
);
1111 dst
= ip6_sk_dst_check(sk
, dst
, fl6
);
1115 dst
= ip6_dst_lookup_flow(sk
, fl6
, final_dst
);
1116 if (connected
&& !IS_ERR(dst
))
1117 ip6_sk_dst_store_flow(sk
, dst_clone(dst
), fl6
);
1121 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow
);
1123 static inline struct ipv6_opt_hdr
*ip6_opt_dup(struct ipv6_opt_hdr
*src
,
1126 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1129 static inline struct ipv6_rt_hdr
*ip6_rthdr_dup(struct ipv6_rt_hdr
*src
,
1132 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1135 static void ip6_append_data_mtu(unsigned int *mtu
,
1137 unsigned int fragheaderlen
,
1138 struct sk_buff
*skb
,
1139 struct rt6_info
*rt
,
1140 unsigned int orig_mtu
)
1142 if (!(rt
->dst
.flags
& DST_XFRM_TUNNEL
)) {
1144 /* first fragment, reserve header_len */
1145 *mtu
= orig_mtu
- rt
->dst
.header_len
;
1149 * this fragment is not first, the headers
1150 * space is regarded as data space.
1154 *maxfraglen
= ((*mtu
- fragheaderlen
) & ~7)
1155 + fragheaderlen
- sizeof(struct frag_hdr
);
1159 static int ip6_setup_cork(struct sock
*sk
, struct inet_cork_full
*cork
,
1160 struct inet6_cork
*v6_cork
, struct ipcm6_cookie
*ipc6
,
1161 struct rt6_info
*rt
, struct flowi6
*fl6
)
1163 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1165 struct ipv6_txoptions
*opt
= ipc6
->opt
;
1171 if (WARN_ON(v6_cork
->opt
))
1174 v6_cork
->opt
= kzalloc(sizeof(*opt
), sk
->sk_allocation
);
1175 if (unlikely(!v6_cork
->opt
))
1178 v6_cork
->opt
->tot_len
= sizeof(*opt
);
1179 v6_cork
->opt
->opt_flen
= opt
->opt_flen
;
1180 v6_cork
->opt
->opt_nflen
= opt
->opt_nflen
;
1182 v6_cork
->opt
->dst0opt
= ip6_opt_dup(opt
->dst0opt
,
1184 if (opt
->dst0opt
&& !v6_cork
->opt
->dst0opt
)
1187 v6_cork
->opt
->dst1opt
= ip6_opt_dup(opt
->dst1opt
,
1189 if (opt
->dst1opt
&& !v6_cork
->opt
->dst1opt
)
1192 v6_cork
->opt
->hopopt
= ip6_opt_dup(opt
->hopopt
,
1194 if (opt
->hopopt
&& !v6_cork
->opt
->hopopt
)
1197 v6_cork
->opt
->srcrt
= ip6_rthdr_dup(opt
->srcrt
,
1199 if (opt
->srcrt
&& !v6_cork
->opt
->srcrt
)
1202 /* need source address above miyazawa*/
1205 cork
->base
.dst
= &rt
->dst
;
1206 cork
->fl
.u
.ip6
= *fl6
;
1207 v6_cork
->hop_limit
= ipc6
->hlimit
;
1208 v6_cork
->tclass
= ipc6
->tclass
;
1209 if (rt
->dst
.flags
& DST_XFRM_TUNNEL
)
1210 mtu
= np
->pmtudisc
>= IPV6_PMTUDISC_PROBE
?
1211 READ_ONCE(rt
->dst
.dev
->mtu
) : dst_mtu(&rt
->dst
);
1213 mtu
= np
->pmtudisc
>= IPV6_PMTUDISC_PROBE
?
1214 READ_ONCE(rt
->dst
.dev
->mtu
) : dst_mtu(xfrm_dst_path(&rt
->dst
));
1215 if (np
->frag_size
< mtu
) {
1217 mtu
= np
->frag_size
;
1219 if (mtu
< IPV6_MIN_MTU
)
1221 cork
->base
.fragsize
= mtu
;
1222 cork
->base
.gso_size
= ipc6
->gso_size
;
1223 cork
->base
.tx_flags
= 0;
1224 sock_tx_timestamp(sk
, ipc6
->sockc
.tsflags
, &cork
->base
.tx_flags
);
1226 if (dst_allfrag(xfrm_dst_path(&rt
->dst
)))
1227 cork
->base
.flags
|= IPCORK_ALLFRAG
;
1228 cork
->base
.length
= 0;
1230 cork
->base
.transmit_time
= ipc6
->sockc
.transmit_time
;
1235 static int __ip6_append_data(struct sock
*sk
,
1237 struct sk_buff_head
*queue
,
1238 struct inet_cork
*cork
,
1239 struct inet6_cork
*v6_cork
,
1240 struct page_frag
*pfrag
,
1241 int getfrag(void *from
, char *to
, int offset
,
1242 int len
, int odd
, struct sk_buff
*skb
),
1243 void *from
, int length
, int transhdrlen
,
1244 unsigned int flags
, struct ipcm6_cookie
*ipc6
)
1246 struct sk_buff
*skb
, *skb_prev
= NULL
;
1247 unsigned int maxfraglen
, fragheaderlen
, mtu
, orig_mtu
, pmtu
;
1249 int dst_exthdrlen
= 0;
1255 struct rt6_info
*rt
= (struct rt6_info
*)cork
->dst
;
1256 struct ipv6_txoptions
*opt
= v6_cork
->opt
;
1257 int csummode
= CHECKSUM_NONE
;
1258 unsigned int maxnonfragsize
, headersize
;
1259 unsigned int wmem_alloc_delta
= 0;
1262 skb
= skb_peek_tail(queue
);
1264 exthdrlen
= opt
? opt
->opt_flen
: 0;
1265 dst_exthdrlen
= rt
->dst
.header_len
- rt
->rt6i_nfheader_len
;
1268 paged
= !!cork
->gso_size
;
1269 mtu
= cork
->gso_size
? IP6_MAX_MTU
: cork
->fragsize
;
1272 if (cork
->tx_flags
& SKBTX_ANY_SW_TSTAMP
&&
1273 sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
)
1274 tskey
= sk
->sk_tskey
++;
1276 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1278 fragheaderlen
= sizeof(struct ipv6hdr
) + rt
->rt6i_nfheader_len
+
1279 (opt
? opt
->opt_nflen
: 0);
1280 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
-
1281 sizeof(struct frag_hdr
);
1283 headersize
= sizeof(struct ipv6hdr
) +
1284 (opt
? opt
->opt_flen
+ opt
->opt_nflen
: 0) +
1285 (dst_allfrag(&rt
->dst
) ?
1286 sizeof(struct frag_hdr
) : 0) +
1287 rt
->rt6i_nfheader_len
;
1289 /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1290 * the first fragment
1292 if (headersize
+ transhdrlen
> mtu
)
1295 if (cork
->length
+ length
> mtu
- headersize
&& ipc6
->dontfrag
&&
1296 (sk
->sk_protocol
== IPPROTO_UDP
||
1297 sk
->sk_protocol
== IPPROTO_RAW
)) {
1298 ipv6_local_rxpmtu(sk
, fl6
, mtu
- headersize
+
1299 sizeof(struct ipv6hdr
));
1303 if (ip6_sk_ignore_df(sk
))
1304 maxnonfragsize
= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
;
1306 maxnonfragsize
= mtu
;
1308 if (cork
->length
+ length
> maxnonfragsize
- headersize
) {
1310 pmtu
= max_t(int, mtu
- headersize
+ sizeof(struct ipv6hdr
), 0);
1311 ipv6_local_error(sk
, EMSGSIZE
, fl6
, pmtu
);
1315 /* CHECKSUM_PARTIAL only with no extension headers and when
1316 * we are not going to fragment
1318 if (transhdrlen
&& sk
->sk_protocol
== IPPROTO_UDP
&&
1319 headersize
== sizeof(struct ipv6hdr
) &&
1320 length
<= mtu
- headersize
&&
1321 (!(flags
& MSG_MORE
) || cork
->gso_size
) &&
1322 rt
->dst
.dev
->features
& (NETIF_F_IPV6_CSUM
| NETIF_F_HW_CSUM
))
1323 csummode
= CHECKSUM_PARTIAL
;
1326 * Let's try using as much space as possible.
1327 * Use MTU if total length of the message fits into the MTU.
1328 * Otherwise, we need to reserve fragment header and
1329 * fragment alignment (= 8-15 octects, in total).
1331 * Note that we may need to "move" the data from the tail of
1332 * of the buffer to the new fragment when we split
1335 * FIXME: It may be fragmented into multiple chunks
1336 * at once if non-fragmentable extension headers
1341 cork
->length
+= length
;
1345 while (length
> 0) {
1346 /* Check if the remaining data fits into current packet. */
1347 copy
= (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
1349 copy
= maxfraglen
- skb
->len
;
1353 unsigned int datalen
;
1354 unsigned int fraglen
;
1355 unsigned int fraggap
;
1356 unsigned int alloclen
;
1357 unsigned int pagedlen
;
1359 /* There's no room in the current skb */
1361 fraggap
= skb
->len
- maxfraglen
;
1364 /* update mtu and maxfraglen if necessary */
1365 if (!skb
|| !skb_prev
)
1366 ip6_append_data_mtu(&mtu
, &maxfraglen
,
1367 fragheaderlen
, skb
, rt
,
1373 * If remaining data exceeds the mtu,
1374 * we know we need more fragment(s).
1376 datalen
= length
+ fraggap
;
1378 if (datalen
> (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
1379 datalen
= maxfraglen
- fragheaderlen
- rt
->dst
.trailer_len
;
1380 fraglen
= datalen
+ fragheaderlen
;
1383 if ((flags
& MSG_MORE
) &&
1384 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1389 alloclen
= min_t(int, fraglen
, MAX_HEADER
);
1390 pagedlen
= fraglen
- alloclen
;
1393 alloclen
+= dst_exthdrlen
;
1395 if (datalen
!= length
+ fraggap
) {
1397 * this is not the last fragment, the trailer
1398 * space is regarded as data space.
1400 datalen
+= rt
->dst
.trailer_len
;
1403 alloclen
+= rt
->dst
.trailer_len
;
1404 fraglen
= datalen
+ fragheaderlen
;
1407 * We just reserve space for fragment header.
1408 * Note: this may be overallocation if the message
1409 * (without MSG_MORE) fits into the MTU.
1411 alloclen
+= sizeof(struct frag_hdr
);
1413 copy
= datalen
- transhdrlen
- fraggap
- pagedlen
;
1419 skb
= sock_alloc_send_skb(sk
,
1421 (flags
& MSG_DONTWAIT
), &err
);
1424 if (refcount_read(&sk
->sk_wmem_alloc
) + wmem_alloc_delta
<=
1426 skb
= alloc_skb(alloclen
+ hh_len
,
1434 * Fill in the control structures
1436 skb
->protocol
= htons(ETH_P_IPV6
);
1437 skb
->ip_summed
= csummode
;
1439 /* reserve for fragmentation and ipsec header */
1440 skb_reserve(skb
, hh_len
+ sizeof(struct frag_hdr
) +
1443 /* Only the initial fragment is time stamped */
1444 skb_shinfo(skb
)->tx_flags
= cork
->tx_flags
;
1446 skb_shinfo(skb
)->tskey
= tskey
;
1450 * Find where to start putting bytes
1452 data
= skb_put(skb
, fraglen
- pagedlen
);
1453 skb_set_network_header(skb
, exthdrlen
);
1454 data
+= fragheaderlen
;
1455 skb
->transport_header
= (skb
->network_header
+
1458 skb
->csum
= skb_copy_and_csum_bits(
1459 skb_prev
, maxfraglen
,
1460 data
+ transhdrlen
, fraggap
, 0);
1461 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1464 pskb_trim_unique(skb_prev
, maxfraglen
);
1467 getfrag(from
, data
+ transhdrlen
, offset
,
1468 copy
, fraggap
, skb
) < 0) {
1475 length
-= copy
+ transhdrlen
;
1480 if ((flags
& MSG_CONFIRM
) && !skb_prev
)
1481 skb_set_dst_pending_confirm(skb
, 1);
1484 * Put the packet on the pending queue
1486 if (!skb
->destructor
) {
1487 skb
->destructor
= sock_wfree
;
1489 wmem_alloc_delta
+= skb
->truesize
;
1491 __skb_queue_tail(queue
, skb
);
1498 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
) &&
1499 skb_tailroom(skb
) >= copy
) {
1503 if (getfrag(from
, skb_put(skb
, copy
),
1504 offset
, copy
, off
, skb
) < 0) {
1505 __skb_trim(skb
, off
);
1510 int i
= skb_shinfo(skb
)->nr_frags
;
1513 if (!sk_page_frag_refill(sk
, pfrag
))
1516 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1519 if (i
== MAX_SKB_FRAGS
)
1522 __skb_fill_page_desc(skb
, i
, pfrag
->page
,
1524 skb_shinfo(skb
)->nr_frags
= ++i
;
1525 get_page(pfrag
->page
);
1527 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1529 page_address(pfrag
->page
) + pfrag
->offset
,
1530 offset
, copy
, skb
->len
, skb
) < 0)
1533 pfrag
->offset
+= copy
;
1534 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1536 skb
->data_len
+= copy
;
1537 skb
->truesize
+= copy
;
1538 wmem_alloc_delta
+= copy
;
1544 if (wmem_alloc_delta
)
1545 refcount_add(wmem_alloc_delta
, &sk
->sk_wmem_alloc
);
1551 cork
->length
-= length
;
1552 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1553 refcount_add(wmem_alloc_delta
, &sk
->sk_wmem_alloc
);
1557 int ip6_append_data(struct sock
*sk
,
1558 int getfrag(void *from
, char *to
, int offset
, int len
,
1559 int odd
, struct sk_buff
*skb
),
1560 void *from
, int length
, int transhdrlen
,
1561 struct ipcm6_cookie
*ipc6
, struct flowi6
*fl6
,
1562 struct rt6_info
*rt
, unsigned int flags
)
1564 struct inet_sock
*inet
= inet_sk(sk
);
1565 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1569 if (flags
&MSG_PROBE
)
1571 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1575 err
= ip6_setup_cork(sk
, &inet
->cork
, &np
->cork
,
1580 exthdrlen
= (ipc6
->opt
? ipc6
->opt
->opt_flen
: 0);
1581 length
+= exthdrlen
;
1582 transhdrlen
+= exthdrlen
;
1584 fl6
= &inet
->cork
.fl
.u
.ip6
;
1588 return __ip6_append_data(sk
, fl6
, &sk
->sk_write_queue
, &inet
->cork
.base
,
1589 &np
->cork
, sk_page_frag(sk
), getfrag
,
1590 from
, length
, transhdrlen
, flags
, ipc6
);
1592 EXPORT_SYMBOL_GPL(ip6_append_data
);
1594 static void ip6_cork_release(struct inet_cork_full
*cork
,
1595 struct inet6_cork
*v6_cork
)
1598 kfree(v6_cork
->opt
->dst0opt
);
1599 kfree(v6_cork
->opt
->dst1opt
);
1600 kfree(v6_cork
->opt
->hopopt
);
1601 kfree(v6_cork
->opt
->srcrt
);
1602 kfree(v6_cork
->opt
);
1603 v6_cork
->opt
= NULL
;
1606 if (cork
->base
.dst
) {
1607 dst_release(cork
->base
.dst
);
1608 cork
->base
.dst
= NULL
;
1609 cork
->base
.flags
&= ~IPCORK_ALLFRAG
;
1611 memset(&cork
->fl
, 0, sizeof(cork
->fl
));
1614 struct sk_buff
*__ip6_make_skb(struct sock
*sk
,
1615 struct sk_buff_head
*queue
,
1616 struct inet_cork_full
*cork
,
1617 struct inet6_cork
*v6_cork
)
1619 struct sk_buff
*skb
, *tmp_skb
;
1620 struct sk_buff
**tail_skb
;
1621 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1622 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1623 struct net
*net
= sock_net(sk
);
1624 struct ipv6hdr
*hdr
;
1625 struct ipv6_txoptions
*opt
= v6_cork
->opt
;
1626 struct rt6_info
*rt
= (struct rt6_info
*)cork
->base
.dst
;
1627 struct flowi6
*fl6
= &cork
->fl
.u
.ip6
;
1628 unsigned char proto
= fl6
->flowi6_proto
;
1630 skb
= __skb_dequeue(queue
);
1633 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1635 /* move skb->data to ip header from ext header */
1636 if (skb
->data
< skb_network_header(skb
))
1637 __skb_pull(skb
, skb_network_offset(skb
));
1638 while ((tmp_skb
= __skb_dequeue(queue
)) != NULL
) {
1639 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1640 *tail_skb
= tmp_skb
;
1641 tail_skb
= &(tmp_skb
->next
);
1642 skb
->len
+= tmp_skb
->len
;
1643 skb
->data_len
+= tmp_skb
->len
;
1644 skb
->truesize
+= tmp_skb
->truesize
;
1645 tmp_skb
->destructor
= NULL
;
1649 /* Allow local fragmentation. */
1650 skb
->ignore_df
= ip6_sk_ignore_df(sk
);
1652 *final_dst
= fl6
->daddr
;
1653 __skb_pull(skb
, skb_network_header_len(skb
));
1654 if (opt
&& opt
->opt_flen
)
1655 ipv6_push_frag_opts(skb
, opt
, &proto
);
1656 if (opt
&& opt
->opt_nflen
)
1657 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
, &fl6
->saddr
);
1659 skb_push(skb
, sizeof(struct ipv6hdr
));
1660 skb_reset_network_header(skb
);
1661 hdr
= ipv6_hdr(skb
);
1663 ip6_flow_hdr(hdr
, v6_cork
->tclass
,
1664 ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
,
1665 ip6_autoflowlabel(net
, np
), fl6
));
1666 hdr
->hop_limit
= v6_cork
->hop_limit
;
1667 hdr
->nexthdr
= proto
;
1668 hdr
->saddr
= fl6
->saddr
;
1669 hdr
->daddr
= *final_dst
;
1671 skb
->priority
= sk
->sk_priority
;
1672 skb
->mark
= sk
->sk_mark
;
1674 skb
->tstamp
= cork
->base
.transmit_time
;
1676 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1677 IP6_UPD_PO_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
1678 if (proto
== IPPROTO_ICMPV6
) {
1679 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
1681 ICMP6MSGOUT_INC_STATS(net
, idev
, icmp6_hdr(skb
)->icmp6_type
);
1682 ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTMSGS
);
1685 ip6_cork_release(cork
, v6_cork
);
1690 int ip6_send_skb(struct sk_buff
*skb
)
1692 struct net
*net
= sock_net(skb
->sk
);
1693 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
1696 err
= ip6_local_out(net
, skb
->sk
, skb
);
1699 err
= net_xmit_errno(err
);
1701 IP6_INC_STATS(net
, rt
->rt6i_idev
,
1702 IPSTATS_MIB_OUTDISCARDS
);
1708 int ip6_push_pending_frames(struct sock
*sk
)
1710 struct sk_buff
*skb
;
1712 skb
= ip6_finish_skb(sk
);
1716 return ip6_send_skb(skb
);
1718 EXPORT_SYMBOL_GPL(ip6_push_pending_frames
);
1720 static void __ip6_flush_pending_frames(struct sock
*sk
,
1721 struct sk_buff_head
*queue
,
1722 struct inet_cork_full
*cork
,
1723 struct inet6_cork
*v6_cork
)
1725 struct sk_buff
*skb
;
1727 while ((skb
= __skb_dequeue_tail(queue
)) != NULL
) {
1729 IP6_INC_STATS(sock_net(sk
), ip6_dst_idev(skb_dst(skb
)),
1730 IPSTATS_MIB_OUTDISCARDS
);
1734 ip6_cork_release(cork
, v6_cork
);
1737 void ip6_flush_pending_frames(struct sock
*sk
)
1739 __ip6_flush_pending_frames(sk
, &sk
->sk_write_queue
,
1740 &inet_sk(sk
)->cork
, &inet6_sk(sk
)->cork
);
1742 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames
);
1744 struct sk_buff
*ip6_make_skb(struct sock
*sk
,
1745 int getfrag(void *from
, char *to
, int offset
,
1746 int len
, int odd
, struct sk_buff
*skb
),
1747 void *from
, int length
, int transhdrlen
,
1748 struct ipcm6_cookie
*ipc6
, struct flowi6
*fl6
,
1749 struct rt6_info
*rt
, unsigned int flags
,
1750 struct inet_cork_full
*cork
)
1752 struct inet6_cork v6_cork
;
1753 struct sk_buff_head queue
;
1754 int exthdrlen
= (ipc6
->opt
? ipc6
->opt
->opt_flen
: 0);
1757 if (flags
& MSG_PROBE
)
1760 __skb_queue_head_init(&queue
);
1762 cork
->base
.flags
= 0;
1763 cork
->base
.addr
= 0;
1764 cork
->base
.opt
= NULL
;
1765 cork
->base
.dst
= NULL
;
1767 err
= ip6_setup_cork(sk
, cork
, &v6_cork
, ipc6
, rt
, fl6
);
1769 ip6_cork_release(cork
, &v6_cork
);
1770 return ERR_PTR(err
);
1772 if (ipc6
->dontfrag
< 0)
1773 ipc6
->dontfrag
= inet6_sk(sk
)->dontfrag
;
1775 err
= __ip6_append_data(sk
, fl6
, &queue
, &cork
->base
, &v6_cork
,
1776 ¤t
->task_frag
, getfrag
, from
,
1777 length
+ exthdrlen
, transhdrlen
+ exthdrlen
,
1780 __ip6_flush_pending_frames(sk
, &queue
, cork
, &v6_cork
);
1781 return ERR_PTR(err
);
1784 return __ip6_make_skb(sk
, &queue
, cork
, &v6_cork
);