2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*));
61 int __ip6_local_out(struct sk_buff
*skb
)
65 len
= skb
->len
- sizeof(struct ipv6hdr
);
66 if (len
> IPV6_MAXPLEN
)
68 ipv6_hdr(skb
)->payload_len
= htons(len
);
70 return nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, skb
, NULL
,
71 skb_dst(skb
)->dev
, dst_output
);
74 int ip6_local_out(struct sk_buff
*skb
)
78 err
= __ip6_local_out(skb
);
80 err
= dst_output(skb
);
84 EXPORT_SYMBOL_GPL(ip6_local_out
);
86 /* dev_loopback_xmit for use with netfilter. */
87 static int ip6_dev_loopback_xmit(struct sk_buff
*newskb
)
89 skb_reset_mac_header(newskb
);
90 __skb_pull(newskb
, skb_network_offset(newskb
));
91 newskb
->pkt_type
= PACKET_LOOPBACK
;
92 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
93 WARN_ON(!skb_dst(newskb
));
99 static int ip6_finish_output2(struct sk_buff
*skb
)
101 struct dst_entry
*dst
= skb_dst(skb
);
102 struct net_device
*dev
= dst
->dev
;
103 struct neighbour
*neigh
;
105 skb
->protocol
= htons(ETH_P_IPV6
);
108 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
)) {
109 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
111 if (!(dev
->flags
& IFF_LOOPBACK
) && sk_mc_loop(skb
->sk
) &&
112 ((mroute6_socket(dev_net(dev
), skb
) &&
113 !(IP6CB(skb
)->flags
& IP6SKB_FORWARDED
)) ||
114 ipv6_chk_mcast_addr(dev
, &ipv6_hdr(skb
)->daddr
,
115 &ipv6_hdr(skb
)->saddr
))) {
116 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
118 /* Do not check for IFF_ALLMULTI; multicast routing
119 is not supported in any case.
122 NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
123 newskb
, NULL
, newskb
->dev
,
124 ip6_dev_loopback_xmit
);
126 if (ipv6_hdr(skb
)->hop_limit
== 0) {
127 IP6_INC_STATS(dev_net(dev
), idev
,
128 IPSTATS_MIB_OUTDISCARDS
);
134 IP6_UPD_PO_STATS(dev_net(dev
), idev
, IPSTATS_MIB_OUTMCAST
,
139 neigh
= dst_get_neighbour_noref(dst
);
141 int res
= neigh_output(neigh
, skb
);
147 IP6_INC_STATS_BH(dev_net(dst
->dev
),
148 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
153 static int ip6_finish_output(struct sk_buff
*skb
)
155 if ((skb
->len
> ip6_skb_dst_mtu(skb
) && !skb_is_gso(skb
)) ||
156 dst_allfrag(skb_dst(skb
)))
157 return ip6_fragment(skb
, ip6_finish_output2
);
159 return ip6_finish_output2(skb
);
162 int ip6_output(struct sk_buff
*skb
)
164 struct net_device
*dev
= skb_dst(skb
)->dev
;
165 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
166 if (unlikely(idev
->cnf
.disable_ipv6
)) {
167 IP6_INC_STATS(dev_net(dev
), idev
,
168 IPSTATS_MIB_OUTDISCARDS
);
173 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
, skb
, NULL
, dev
,
175 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
179 * xmit an sk_buff (used by TCP, SCTP and DCCP)
182 int ip6_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi6
*fl6
,
183 struct ipv6_txoptions
*opt
, int tclass
)
185 struct net
*net
= sock_net(sk
);
186 struct ipv6_pinfo
*np
= inet6_sk(sk
);
187 struct in6_addr
*first_hop
= &fl6
->daddr
;
188 struct dst_entry
*dst
= skb_dst(skb
);
190 u8 proto
= fl6
->flowi6_proto
;
191 int seg_len
= skb
->len
;
196 unsigned int head_room
;
198 /* First: exthdrs may take lots of space (~8K for now)
199 MAX_HEADER is not enough.
201 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
202 seg_len
+= head_room
;
203 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
205 if (skb_headroom(skb
) < head_room
) {
206 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
208 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
209 IPSTATS_MIB_OUTDISCARDS
);
215 skb_set_owner_w(skb
, sk
);
218 ipv6_push_frag_opts(skb
, opt
, &proto
);
220 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
223 skb_push(skb
, sizeof(struct ipv6hdr
));
224 skb_reset_network_header(skb
);
228 * Fill in the IPv6 header
231 hlimit
= np
->hop_limit
;
233 hlimit
= ip6_dst_hoplimit(dst
);
235 *(__be32
*)hdr
= htonl(0x60000000 | (tclass
<< 20)) | fl6
->flowlabel
;
237 hdr
->payload_len
= htons(seg_len
);
238 hdr
->nexthdr
= proto
;
239 hdr
->hop_limit
= hlimit
;
241 hdr
->saddr
= fl6
->saddr
;
242 hdr
->daddr
= *first_hop
;
244 skb
->priority
= sk
->sk_priority
;
245 skb
->mark
= sk
->sk_mark
;
248 if ((skb
->len
<= mtu
) || skb
->local_df
|| skb_is_gso(skb
)) {
249 IP6_UPD_PO_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
250 IPSTATS_MIB_OUT
, skb
->len
);
251 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, skb
, NULL
,
252 dst
->dev
, dst_output
);
256 printk(KERN_DEBUG
"IPv6: sending pkt_too_big to self\n");
258 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
259 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_FRAGFAILS
);
264 EXPORT_SYMBOL(ip6_xmit
);
267 * To avoid extra problems ND packets are send through this
268 * routine. It's code duplication but I really want to avoid
269 * extra checks since ipv6_build_header is used by TCP (which
270 * is for us performance critical)
273 int ip6_nd_hdr(struct sock
*sk
, struct sk_buff
*skb
, struct net_device
*dev
,
274 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
277 struct ipv6_pinfo
*np
= inet6_sk(sk
);
280 skb
->protocol
= htons(ETH_P_IPV6
);
283 skb_reset_network_header(skb
);
284 skb_put(skb
, sizeof(struct ipv6hdr
));
287 *(__be32
*)hdr
= htonl(0x60000000);
289 hdr
->payload_len
= htons(len
);
290 hdr
->nexthdr
= proto
;
291 hdr
->hop_limit
= np
->hop_limit
;
299 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
301 struct ip6_ra_chain
*ra
;
302 struct sock
*last
= NULL
;
304 read_lock(&ip6_ra_lock
);
305 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
306 struct sock
*sk
= ra
->sk
;
307 if (sk
&& ra
->sel
== sel
&&
308 (!sk
->sk_bound_dev_if
||
309 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
311 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
313 rawv6_rcv(last
, skb2
);
320 rawv6_rcv(last
, skb
);
321 read_unlock(&ip6_ra_lock
);
324 read_unlock(&ip6_ra_lock
);
328 static int ip6_forward_proxy_check(struct sk_buff
*skb
)
330 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
331 u8 nexthdr
= hdr
->nexthdr
;
335 if (ipv6_ext_hdr(nexthdr
)) {
336 offset
= ipv6_skip_exthdr(skb
, sizeof(*hdr
), &nexthdr
, &frag_off
);
340 offset
= sizeof(struct ipv6hdr
);
342 if (nexthdr
== IPPROTO_ICMPV6
) {
343 struct icmp6hdr
*icmp6
;
345 if (!pskb_may_pull(skb
, (skb_network_header(skb
) +
346 offset
+ 1 - skb
->data
)))
349 icmp6
= (struct icmp6hdr
*)(skb_network_header(skb
) + offset
);
351 switch (icmp6
->icmp6_type
) {
352 case NDISC_ROUTER_SOLICITATION
:
353 case NDISC_ROUTER_ADVERTISEMENT
:
354 case NDISC_NEIGHBOUR_SOLICITATION
:
355 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
357 /* For reaction involving unicast neighbor discovery
358 * message destined to the proxied address, pass it to
368 * The proxying router can't forward traffic sent to a link-local
369 * address, so signal the sender and discard the packet. This
370 * behavior is clarified by the MIPv6 specification.
372 if (ipv6_addr_type(&hdr
->daddr
) & IPV6_ADDR_LINKLOCAL
) {
373 dst_link_failure(skb
);
380 static inline int ip6_forward_finish(struct sk_buff
*skb
)
382 return dst_output(skb
);
385 int ip6_forward(struct sk_buff
*skb
)
387 struct dst_entry
*dst
= skb_dst(skb
);
388 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
389 struct inet6_skb_parm
*opt
= IP6CB(skb
);
390 struct net
*net
= dev_net(dst
->dev
);
394 if (net
->ipv6
.devconf_all
->forwarding
== 0)
397 if (skb_warn_if_lro(skb
))
400 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
401 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
405 if (skb
->pkt_type
!= PACKET_HOST
)
408 skb_forward_csum(skb
);
411 * We DO NOT make any processing on
412 * RA packets, pushing them to user level AS IS
413 * without ane WARRANTY that application will be able
414 * to interpret them. The reason is that we
415 * cannot make anything clever here.
417 * We are not end-node, so that if packet contains
418 * AH/ESP, we cannot make anything.
419 * Defragmentation also would be mistake, RA packets
420 * cannot be fragmented, because there is no warranty
421 * that different fragments will go along one path. --ANK
424 u8
*ptr
= skb_network_header(skb
) + opt
->ra
;
425 if (ip6_call_ra_chain(skb
, (ptr
[2]<<8) + ptr
[3]))
430 * check and decrement ttl
432 if (hdr
->hop_limit
<= 1) {
433 /* Force OUTPUT device used as source address */
435 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
, 0);
436 IP6_INC_STATS_BH(net
,
437 ip6_dst_idev(dst
), IPSTATS_MIB_INHDRERRORS
);
443 /* XXX: idev->cnf.proxy_ndp? */
444 if (net
->ipv6
.devconf_all
->proxy_ndp
&&
445 pneigh_lookup(&nd_tbl
, net
, &hdr
->daddr
, skb
->dev
, 0)) {
446 int proxied
= ip6_forward_proxy_check(skb
);
448 return ip6_input(skb
);
449 else if (proxied
< 0) {
450 IP6_INC_STATS(net
, ip6_dst_idev(dst
),
451 IPSTATS_MIB_INDISCARDS
);
456 if (!xfrm6_route_forward(skb
)) {
457 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
462 /* IPv6 specs say nothing about it, but it is clear that we cannot
463 send redirects to source routed frames.
464 We don't send redirects to frames decapsulated from IPsec.
466 n
= dst_get_neighbour_noref(dst
);
467 if (skb
->dev
== dst
->dev
&& n
&& opt
->srcrt
== 0 && !skb_sec_path(skb
)) {
468 struct in6_addr
*target
= NULL
;
472 * incoming and outgoing devices are the same
476 rt
= (struct rt6_info
*) dst
;
477 if ((rt
->rt6i_flags
& RTF_GATEWAY
))
478 target
= (struct in6_addr
*)&n
->primary_key
;
480 target
= &hdr
->daddr
;
483 rt6_bind_peer(rt
, 1);
485 /* Limit redirects both by destination (here)
486 and by source (inside ndisc_send_redirect)
488 if (inet_peer_xrlim_allow(rt
->rt6i_peer
, 1*HZ
))
489 ndisc_send_redirect(skb
, n
, target
);
491 int addrtype
= ipv6_addr_type(&hdr
->saddr
);
493 /* This check is security critical. */
494 if (addrtype
== IPV6_ADDR_ANY
||
495 addrtype
& (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LOOPBACK
))
497 if (addrtype
& IPV6_ADDR_LINKLOCAL
) {
498 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
,
499 ICMPV6_NOT_NEIGHBOUR
, 0);
505 if (mtu
< IPV6_MIN_MTU
)
508 if (skb
->len
> mtu
&& !skb_is_gso(skb
)) {
509 /* Again, force OUTPUT device used as source address */
511 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
512 IP6_INC_STATS_BH(net
,
513 ip6_dst_idev(dst
), IPSTATS_MIB_INTOOBIGERRORS
);
514 IP6_INC_STATS_BH(net
,
515 ip6_dst_idev(dst
), IPSTATS_MIB_FRAGFAILS
);
520 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
521 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTDISCARDS
);
527 /* Mangling hops number delayed to point after skb COW */
531 IP6_INC_STATS_BH(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
532 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
, skb
, skb
->dev
, dst
->dev
,
536 IP6_INC_STATS_BH(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INADDRERRORS
);
542 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
544 to
->pkt_type
= from
->pkt_type
;
545 to
->priority
= from
->priority
;
546 to
->protocol
= from
->protocol
;
548 skb_dst_set(to
, dst_clone(skb_dst(from
)));
550 to
->mark
= from
->mark
;
552 #ifdef CONFIG_NET_SCHED
553 to
->tc_index
= from
->tc_index
;
556 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
557 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
558 to
->nf_trace
= from
->nf_trace
;
560 skb_copy_secmark(to
, from
);
563 int ip6_find_1stfragopt(struct sk_buff
*skb
, u8
**nexthdr
)
565 u16 offset
= sizeof(struct ipv6hdr
);
566 struct ipv6_opt_hdr
*exthdr
=
567 (struct ipv6_opt_hdr
*)(ipv6_hdr(skb
) + 1);
568 unsigned int packet_len
= skb
->tail
- skb
->network_header
;
570 *nexthdr
= &ipv6_hdr(skb
)->nexthdr
;
572 while (offset
+ 1 <= packet_len
) {
578 case NEXTHDR_ROUTING
:
582 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
583 if (ipv6_find_tlv(skb
, offset
, IPV6_TLV_HAO
) >= 0)
593 offset
+= ipv6_optlen(exthdr
);
594 *nexthdr
= &exthdr
->nexthdr
;
595 exthdr
= (struct ipv6_opt_hdr
*)(skb_network_header(skb
) +
602 void ipv6_select_ident(struct frag_hdr
*fhdr
, struct rt6_info
*rt
)
604 static atomic_t ipv6_fragmentation_id
;
607 if (rt
&& !(rt
->dst
.flags
& DST_NOPEER
)) {
608 struct inet_peer
*peer
;
611 rt6_bind_peer(rt
, 1);
612 peer
= rt
->rt6i_peer
;
614 fhdr
->identification
= htonl(inet_getid(peer
, 0));
619 old
= atomic_read(&ipv6_fragmentation_id
);
623 } while (atomic_cmpxchg(&ipv6_fragmentation_id
, old
, new) != old
);
624 fhdr
->identification
= htonl(new);
627 int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
629 struct sk_buff
*frag
;
630 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
631 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
632 struct ipv6hdr
*tmp_hdr
;
634 unsigned int mtu
, hlen
, left
, len
;
637 int ptr
, offset
= 0, err
=0;
638 u8
*prevhdr
, nexthdr
= 0;
639 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
641 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
644 mtu
= ip6_skb_dst_mtu(skb
);
646 /* We must not fragment if the socket is set to force MTU discovery
647 * or if the skb it not generated by a local socket.
649 if (!skb
->local_df
&& skb
->len
> mtu
) {
650 skb
->dev
= skb_dst(skb
)->dev
;
651 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
652 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
653 IPSTATS_MIB_FRAGFAILS
);
658 if (np
&& np
->frag_size
< mtu
) {
662 mtu
-= hlen
+ sizeof(struct frag_hdr
);
664 if (skb_has_frag_list(skb
)) {
665 int first_len
= skb_pagelen(skb
);
666 struct sk_buff
*frag2
;
668 if (first_len
- hlen
> mtu
||
669 ((first_len
- hlen
) & 7) ||
673 skb_walk_frags(skb
, frag
) {
674 /* Correct geometry. */
675 if (frag
->len
> mtu
||
676 ((frag
->len
& 7) && frag
->next
) ||
677 skb_headroom(frag
) < hlen
)
678 goto slow_path_clean
;
680 /* Partially cloned skb? */
681 if (skb_shared(frag
))
682 goto slow_path_clean
;
687 frag
->destructor
= sock_wfree
;
689 skb
->truesize
-= frag
->truesize
;
694 frag
= skb_shinfo(skb
)->frag_list
;
695 skb_frag_list_init(skb
);
698 *prevhdr
= NEXTHDR_FRAGMENT
;
699 tmp_hdr
= kmemdup(skb_network_header(skb
), hlen
, GFP_ATOMIC
);
701 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
702 IPSTATS_MIB_FRAGFAILS
);
706 __skb_pull(skb
, hlen
);
707 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
708 __skb_push(skb
, hlen
);
709 skb_reset_network_header(skb
);
710 memcpy(skb_network_header(skb
), tmp_hdr
, hlen
);
712 ipv6_select_ident(fh
, rt
);
713 fh
->nexthdr
= nexthdr
;
715 fh
->frag_off
= htons(IP6_MF
);
716 frag_id
= fh
->identification
;
718 first_len
= skb_pagelen(skb
);
719 skb
->data_len
= first_len
- skb_headlen(skb
);
720 skb
->len
= first_len
;
721 ipv6_hdr(skb
)->payload_len
= htons(first_len
-
722 sizeof(struct ipv6hdr
));
727 /* Prepare header of the next frame,
728 * before previous one went down. */
730 frag
->ip_summed
= CHECKSUM_NONE
;
731 skb_reset_transport_header(frag
);
732 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
733 __skb_push(frag
, hlen
);
734 skb_reset_network_header(frag
);
735 memcpy(skb_network_header(frag
), tmp_hdr
,
737 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
738 fh
->nexthdr
= nexthdr
;
740 fh
->frag_off
= htons(offset
);
741 if (frag
->next
!= NULL
)
742 fh
->frag_off
|= htons(IP6_MF
);
743 fh
->identification
= frag_id
;
744 ipv6_hdr(frag
)->payload_len
=
746 sizeof(struct ipv6hdr
));
747 ip6_copy_metadata(frag
, skb
);
752 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
753 IPSTATS_MIB_FRAGCREATES
);
766 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
767 IPSTATS_MIB_FRAGOKS
);
768 dst_release(&rt
->dst
);
778 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
779 IPSTATS_MIB_FRAGFAILS
);
780 dst_release(&rt
->dst
);
784 skb_walk_frags(skb
, frag2
) {
788 frag2
->destructor
= NULL
;
789 skb
->truesize
+= frag2
->truesize
;
794 left
= skb
->len
- hlen
; /* Space per frame */
795 ptr
= hlen
; /* Where to start from */
798 * Fragment the datagram.
801 *prevhdr
= NEXTHDR_FRAGMENT
;
802 hroom
= LL_RESERVED_SPACE(rt
->dst
.dev
);
803 troom
= rt
->dst
.dev
->needed_tailroom
;
806 * Keep copying data until we run out.
810 /* IF: it doesn't fit, use 'mtu' - the data space left */
813 /* IF: we are not sending up to and including the packet end
814 then align the next start on an eight byte boundary */
822 if ((frag
= alloc_skb(len
+ hlen
+ sizeof(struct frag_hdr
) +
823 hroom
+ troom
, GFP_ATOMIC
)) == NULL
) {
824 NETDEBUG(KERN_INFO
"IPv6: frag: no memory for new fragment!\n");
825 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
826 IPSTATS_MIB_FRAGFAILS
);
832 * Set up data on packet
835 ip6_copy_metadata(frag
, skb
);
836 skb_reserve(frag
, hroom
);
837 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
838 skb_reset_network_header(frag
);
839 fh
= (struct frag_hdr
*)(skb_network_header(frag
) + hlen
);
840 frag
->transport_header
= (frag
->network_header
+ hlen
+
841 sizeof(struct frag_hdr
));
844 * Charge the memory for the fragment to any owner
848 skb_set_owner_w(frag
, skb
->sk
);
851 * Copy the packet header into the new buffer.
853 skb_copy_from_linear_data(skb
, skb_network_header(frag
), hlen
);
856 * Build fragment header.
858 fh
->nexthdr
= nexthdr
;
861 ipv6_select_ident(fh
, rt
);
862 frag_id
= fh
->identification
;
864 fh
->identification
= frag_id
;
867 * Copy a block of the IP datagram.
869 if (skb_copy_bits(skb
, ptr
, skb_transport_header(frag
), len
))
873 fh
->frag_off
= htons(offset
);
875 fh
->frag_off
|= htons(IP6_MF
);
876 ipv6_hdr(frag
)->payload_len
= htons(frag
->len
-
877 sizeof(struct ipv6hdr
));
883 * Put this fragment into the sending queue.
889 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
890 IPSTATS_MIB_FRAGCREATES
);
892 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
893 IPSTATS_MIB_FRAGOKS
);
898 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
899 IPSTATS_MIB_FRAGFAILS
);
904 static inline int ip6_rt_check(const struct rt6key
*rt_key
,
905 const struct in6_addr
*fl_addr
,
906 const struct in6_addr
*addr_cache
)
908 return (rt_key
->plen
!= 128 || !ipv6_addr_equal(fl_addr
, &rt_key
->addr
)) &&
909 (addr_cache
== NULL
|| !ipv6_addr_equal(fl_addr
, addr_cache
));
912 static struct dst_entry
*ip6_sk_dst_check(struct sock
*sk
,
913 struct dst_entry
*dst
,
914 const struct flowi6
*fl6
)
916 struct ipv6_pinfo
*np
= inet6_sk(sk
);
917 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
922 /* Yes, checking route validity in not connected
923 * case is not very simple. Take into account,
924 * that we do not support routing by source, TOS,
925 * and MSG_DONTROUTE --ANK (980726)
927 * 1. ip6_rt_check(): If route was host route,
928 * check that cached destination is current.
929 * If it is network route, we still may
930 * check its validity using saved pointer
931 * to the last used address: daddr_cache.
932 * We do not want to save whole address now,
933 * (because main consumer of this service
934 * is tcp, which has not this problem),
935 * so that the last trick works only on connected
937 * 2. oif also should be the same.
939 if (ip6_rt_check(&rt
->rt6i_dst
, &fl6
->daddr
, np
->daddr_cache
) ||
940 #ifdef CONFIG_IPV6_SUBTREES
941 ip6_rt_check(&rt
->rt6i_src
, &fl6
->saddr
, np
->saddr_cache
) ||
943 (fl6
->flowi6_oif
&& fl6
->flowi6_oif
!= dst
->dev
->ifindex
)) {
952 static int ip6_dst_lookup_tail(struct sock
*sk
,
953 struct dst_entry
**dst
, struct flowi6
*fl6
)
955 struct net
*net
= sock_net(sk
);
956 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
962 *dst
= ip6_route_output(net
, sk
, fl6
);
964 if ((err
= (*dst
)->error
))
965 goto out_err_release
;
967 if (ipv6_addr_any(&fl6
->saddr
)) {
968 struct rt6_info
*rt
= (struct rt6_info
*) *dst
;
969 err
= ip6_route_get_saddr(net
, rt
, &fl6
->daddr
,
970 sk
? inet6_sk(sk
)->srcprefs
: 0,
973 goto out_err_release
;
976 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
978 * Here if the dst entry we've looked up
979 * has a neighbour entry that is in the INCOMPLETE
980 * state and the src address from the flow is
981 * marked as OPTIMISTIC, we release the found
982 * dst entry and replace it instead with the
983 * dst entry of the nexthop router
986 n
= dst_get_neighbour_noref(*dst
);
987 if (n
&& !(n
->nud_state
& NUD_VALID
)) {
988 struct inet6_ifaddr
*ifp
;
989 struct flowi6 fl_gw6
;
993 ifp
= ipv6_get_ifaddr(net
, &fl6
->saddr
,
996 redirect
= (ifp
&& ifp
->flags
& IFA_F_OPTIMISTIC
);
1002 * We need to get the dst entry for the
1003 * default router instead
1006 memcpy(&fl_gw6
, fl6
, sizeof(struct flowi6
));
1007 memset(&fl_gw6
.daddr
, 0, sizeof(struct in6_addr
));
1008 *dst
= ip6_route_output(net
, sk
, &fl_gw6
);
1009 if ((err
= (*dst
)->error
))
1010 goto out_err_release
;
1020 if (err
== -ENETUNREACH
)
1021 IP6_INC_STATS_BH(net
, NULL
, IPSTATS_MIB_OUTNOROUTES
);
1028 * ip6_dst_lookup - perform route lookup on flow
1029 * @sk: socket which provides route info
1030 * @dst: pointer to dst_entry * for result
1031 * @fl6: flow to lookup
1033 * This function performs a route lookup on the given flow.
1035 * It returns zero on success, or a standard errno code on error.
1037 int ip6_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi6
*fl6
)
1040 return ip6_dst_lookup_tail(sk
, dst
, fl6
);
1042 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
1045 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1046 * @sk: socket which provides route info
1047 * @fl6: flow to lookup
1048 * @final_dst: final destination address for ipsec lookup
1049 * @can_sleep: we are in a sleepable context
1051 * This function performs a route lookup on the given flow.
1053 * It returns a valid dst pointer on success, or a pointer encoded
1056 struct dst_entry
*ip6_dst_lookup_flow(struct sock
*sk
, struct flowi6
*fl6
,
1057 const struct in6_addr
*final_dst
,
1060 struct dst_entry
*dst
= NULL
;
1063 err
= ip6_dst_lookup_tail(sk
, &dst
, fl6
);
1065 return ERR_PTR(err
);
1067 fl6
->daddr
= *final_dst
;
1069 fl6
->flowi6_flags
|= FLOWI_FLAG_CAN_SLEEP
;
1071 return xfrm_lookup(sock_net(sk
), dst
, flowi6_to_flowi(fl6
), sk
, 0);
1073 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow
);
1076 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1077 * @sk: socket which provides the dst cache and route info
1078 * @fl6: flow to lookup
1079 * @final_dst: final destination address for ipsec lookup
1080 * @can_sleep: we are in a sleepable context
1082 * This function performs a route lookup on the given flow with the
1083 * possibility of using the cached route in the socket if it is valid.
1084 * It will take the socket dst lock when operating on the dst cache.
1085 * As a result, this function can only be used in process context.
1087 * It returns a valid dst pointer on success, or a pointer encoded
1090 struct dst_entry
*ip6_sk_dst_lookup_flow(struct sock
*sk
, struct flowi6
*fl6
,
1091 const struct in6_addr
*final_dst
,
1094 struct dst_entry
*dst
= sk_dst_check(sk
, inet6_sk(sk
)->dst_cookie
);
1097 dst
= ip6_sk_dst_check(sk
, dst
, fl6
);
1099 err
= ip6_dst_lookup_tail(sk
, &dst
, fl6
);
1101 return ERR_PTR(err
);
1103 fl6
->daddr
= *final_dst
;
1105 fl6
->flowi6_flags
|= FLOWI_FLAG_CAN_SLEEP
;
1107 return xfrm_lookup(sock_net(sk
), dst
, flowi6_to_flowi(fl6
), sk
, 0);
1109 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow
);
1111 static inline int ip6_ufo_append_data(struct sock
*sk
,
1112 int getfrag(void *from
, char *to
, int offset
, int len
,
1113 int odd
, struct sk_buff
*skb
),
1114 void *from
, int length
, int hh_len
, int fragheaderlen
,
1115 int transhdrlen
, int mtu
,unsigned int flags
,
1116 struct rt6_info
*rt
)
1119 struct sk_buff
*skb
;
1122 /* There is support for UDP large send offload by network
1123 * device, so create one single skb packet containing complete
1126 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
1127 skb
= sock_alloc_send_skb(sk
,
1128 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
1129 (flags
& MSG_DONTWAIT
), &err
);
1133 /* reserve space for Hardware header */
1134 skb_reserve(skb
, hh_len
);
1136 /* create space for UDP/IP header */
1137 skb_put(skb
,fragheaderlen
+ transhdrlen
);
1139 /* initialize network header pointer */
1140 skb_reset_network_header(skb
);
1142 /* initialize protocol header pointer */
1143 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
1145 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1149 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
1150 (length
- transhdrlen
));
1152 struct frag_hdr fhdr
;
1154 /* Specify the length of each IPv6 datagram fragment.
1155 * It has to be a multiple of 8.
1157 skb_shinfo(skb
)->gso_size
= (mtu
- fragheaderlen
-
1158 sizeof(struct frag_hdr
)) & ~7;
1159 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1160 ipv6_select_ident(&fhdr
, rt
);
1161 skb_shinfo(skb
)->ip6_frag_id
= fhdr
.identification
;
1162 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1166 /* There is not enough support do UPD LSO,
1167 * so follow normal path
1174 static inline struct ipv6_opt_hdr
*ip6_opt_dup(struct ipv6_opt_hdr
*src
,
1177 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1180 static inline struct ipv6_rt_hdr
*ip6_rthdr_dup(struct ipv6_rt_hdr
*src
,
1183 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1186 int ip6_append_data(struct sock
*sk
, int getfrag(void *from
, char *to
,
1187 int offset
, int len
, int odd
, struct sk_buff
*skb
),
1188 void *from
, int length
, int transhdrlen
,
1189 int hlimit
, int tclass
, struct ipv6_txoptions
*opt
, struct flowi6
*fl6
,
1190 struct rt6_info
*rt
, unsigned int flags
, int dontfrag
)
1192 struct inet_sock
*inet
= inet_sk(sk
);
1193 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1194 struct inet_cork
*cork
;
1195 struct sk_buff
*skb
;
1196 unsigned int maxfraglen
, fragheaderlen
;
1204 int csummode
= CHECKSUM_NONE
;
1207 if (flags
&MSG_PROBE
)
1209 cork
= &inet
->cork
.base
;
1210 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1215 if (WARN_ON(np
->cork
.opt
))
1218 np
->cork
.opt
= kmalloc(opt
->tot_len
, sk
->sk_allocation
);
1219 if (unlikely(np
->cork
.opt
== NULL
))
1222 np
->cork
.opt
->tot_len
= opt
->tot_len
;
1223 np
->cork
.opt
->opt_flen
= opt
->opt_flen
;
1224 np
->cork
.opt
->opt_nflen
= opt
->opt_nflen
;
1226 np
->cork
.opt
->dst0opt
= ip6_opt_dup(opt
->dst0opt
,
1228 if (opt
->dst0opt
&& !np
->cork
.opt
->dst0opt
)
1231 np
->cork
.opt
->dst1opt
= ip6_opt_dup(opt
->dst1opt
,
1233 if (opt
->dst1opt
&& !np
->cork
.opt
->dst1opt
)
1236 np
->cork
.opt
->hopopt
= ip6_opt_dup(opt
->hopopt
,
1238 if (opt
->hopopt
&& !np
->cork
.opt
->hopopt
)
1241 np
->cork
.opt
->srcrt
= ip6_rthdr_dup(opt
->srcrt
,
1243 if (opt
->srcrt
&& !np
->cork
.opt
->srcrt
)
1246 /* need source address above miyazawa*/
1249 cork
->dst
= &rt
->dst
;
1250 inet
->cork
.fl
.u
.ip6
= *fl6
;
1251 np
->cork
.hop_limit
= hlimit
;
1252 np
->cork
.tclass
= tclass
;
1253 mtu
= np
->pmtudisc
== IPV6_PMTUDISC_PROBE
?
1254 rt
->dst
.dev
->mtu
: dst_mtu(&rt
->dst
);
1255 if (np
->frag_size
< mtu
) {
1257 mtu
= np
->frag_size
;
1259 cork
->fragsize
= mtu
;
1260 if (dst_allfrag(rt
->dst
.path
))
1261 cork
->flags
|= IPCORK_ALLFRAG
;
1263 sk
->sk_sndmsg_page
= NULL
;
1264 sk
->sk_sndmsg_off
= 0;
1265 exthdrlen
= (opt
? opt
->opt_flen
: 0) - rt
->rt6i_nfheader_len
;
1266 length
+= exthdrlen
;
1267 transhdrlen
+= exthdrlen
;
1268 dst_exthdrlen
= rt
->dst
.header_len
;
1270 rt
= (struct rt6_info
*)cork
->dst
;
1271 fl6
= &inet
->cork
.fl
.u
.ip6
;
1276 mtu
= cork
->fragsize
;
1279 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1281 fragheaderlen
= sizeof(struct ipv6hdr
) + rt
->rt6i_nfheader_len
+
1282 (opt
? opt
->opt_nflen
: 0);
1283 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
- sizeof(struct frag_hdr
);
1285 if (mtu
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
) {
1286 if (cork
->length
+ length
> sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
- fragheaderlen
) {
1287 ipv6_local_error(sk
, EMSGSIZE
, fl6
, mtu
-exthdrlen
);
1292 /* For UDP, check if TX timestamp is enabled */
1293 if (sk
->sk_type
== SOCK_DGRAM
) {
1294 err
= sock_tx_timestamp(sk
, &tx_flags
);
1300 * Let's try using as much space as possible.
1301 * Use MTU if total length of the message fits into the MTU.
1302 * Otherwise, we need to reserve fragment header and
1303 * fragment alignment (= 8-15 octects, in total).
1305 * Note that we may need to "move" the data from the tail of
1306 * of the buffer to the new fragment when we split
1309 * FIXME: It may be fragmented into multiple chunks
1310 * at once if non-fragmentable extension headers
1315 cork
->length
+= length
;
1317 int proto
= sk
->sk_protocol
;
1318 if (dontfrag
&& (proto
== IPPROTO_UDP
|| proto
== IPPROTO_RAW
)){
1319 ipv6_local_rxpmtu(sk
, fl6
, mtu
-exthdrlen
);
1323 if (proto
== IPPROTO_UDP
&&
1324 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
1326 err
= ip6_ufo_append_data(sk
, getfrag
, from
, length
,
1327 hh_len
, fragheaderlen
,
1328 transhdrlen
, mtu
, flags
, rt
);
1335 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1338 while (length
> 0) {
1339 /* Check if the remaining data fits into current packet. */
1340 copy
= (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
1342 copy
= maxfraglen
- skb
->len
;
1346 unsigned int datalen
;
1347 unsigned int fraglen
;
1348 unsigned int fraggap
;
1349 unsigned int alloclen
;
1350 struct sk_buff
*skb_prev
;
1354 /* There's no room in the current skb */
1356 fraggap
= skb_prev
->len
- maxfraglen
;
1361 * If remaining data exceeds the mtu,
1362 * we know we need more fragment(s).
1364 datalen
= length
+ fraggap
;
1365 if (datalen
> (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
1366 datalen
= maxfraglen
- fragheaderlen
;
1368 fraglen
= datalen
+ fragheaderlen
;
1369 if ((flags
& MSG_MORE
) &&
1370 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1373 alloclen
= datalen
+ fragheaderlen
;
1375 alloclen
+= dst_exthdrlen
;
1378 * The last fragment gets additional space at tail.
1379 * Note: we overallocate on fragments with MSG_MODE
1380 * because we have no idea if we're the last one.
1382 if (datalen
== length
+ fraggap
)
1383 alloclen
+= rt
->dst
.trailer_len
;
1386 * We just reserve space for fragment header.
1387 * Note: this may be overallocation if the message
1388 * (without MSG_MORE) fits into the MTU.
1390 alloclen
+= sizeof(struct frag_hdr
);
1393 skb
= sock_alloc_send_skb(sk
,
1395 (flags
& MSG_DONTWAIT
), &err
);
1398 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1400 skb
= sock_wmalloc(sk
,
1401 alloclen
+ hh_len
, 1,
1403 if (unlikely(skb
== NULL
))
1406 /* Only the initial fragment
1415 * Fill in the control structures
1417 skb
->ip_summed
= csummode
;
1419 /* reserve for fragmentation and ipsec header */
1420 skb_reserve(skb
, hh_len
+ sizeof(struct frag_hdr
) +
1423 if (sk
->sk_type
== SOCK_DGRAM
)
1424 skb_shinfo(skb
)->tx_flags
= tx_flags
;
1427 * Find where to start putting bytes
1429 data
= skb_put(skb
, fraglen
);
1430 skb_set_network_header(skb
, exthdrlen
);
1431 data
+= fragheaderlen
;
1432 skb
->transport_header
= (skb
->network_header
+
1435 skb
->csum
= skb_copy_and_csum_bits(
1436 skb_prev
, maxfraglen
,
1437 data
+ transhdrlen
, fraggap
, 0);
1438 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1441 pskb_trim_unique(skb_prev
, maxfraglen
);
1443 copy
= datalen
- transhdrlen
- fraggap
;
1449 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1456 length
-= datalen
- fraggap
;
1460 csummode
= CHECKSUM_NONE
;
1463 * Put the packet on the pending queue
1465 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1472 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1476 if (getfrag(from
, skb_put(skb
, copy
),
1477 offset
, copy
, off
, skb
) < 0) {
1478 __skb_trim(skb
, off
);
1483 int i
= skb_shinfo(skb
)->nr_frags
;
1484 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1485 struct page
*page
= sk
->sk_sndmsg_page
;
1486 int off
= sk
->sk_sndmsg_off
;
1489 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1492 if (page
!= skb_frag_page(frag
)) {
1493 if (i
== MAX_SKB_FRAGS
) {
1497 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1498 skb_frag_ref(skb
, i
);
1499 frag
= &skb_shinfo(skb
)->frags
[i
];
1501 } else if(i
< MAX_SKB_FRAGS
) {
1502 if (copy
> PAGE_SIZE
)
1504 page
= alloc_pages(sk
->sk_allocation
, 0);
1509 sk
->sk_sndmsg_page
= page
;
1510 sk
->sk_sndmsg_off
= 0;
1512 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1513 frag
= &skb_shinfo(skb
)->frags
[i
];
1519 skb_frag_address(frag
) + skb_frag_size(frag
),
1520 offset
, copy
, skb
->len
, skb
) < 0) {
1524 sk
->sk_sndmsg_off
+= copy
;
1525 skb_frag_size_add(frag
, copy
);
1527 skb
->data_len
+= copy
;
1528 skb
->truesize
+= copy
;
1529 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1536 cork
->length
-= length
;
1537 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1541 static void ip6_cork_release(struct inet_sock
*inet
, struct ipv6_pinfo
*np
)
1544 kfree(np
->cork
.opt
->dst0opt
);
1545 kfree(np
->cork
.opt
->dst1opt
);
1546 kfree(np
->cork
.opt
->hopopt
);
1547 kfree(np
->cork
.opt
->srcrt
);
1548 kfree(np
->cork
.opt
);
1549 np
->cork
.opt
= NULL
;
1552 if (inet
->cork
.base
.dst
) {
1553 dst_release(inet
->cork
.base
.dst
);
1554 inet
->cork
.base
.dst
= NULL
;
1555 inet
->cork
.base
.flags
&= ~IPCORK_ALLFRAG
;
1557 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));
1560 int ip6_push_pending_frames(struct sock
*sk
)
1562 struct sk_buff
*skb
, *tmp_skb
;
1563 struct sk_buff
**tail_skb
;
1564 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1565 struct inet_sock
*inet
= inet_sk(sk
);
1566 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1567 struct net
*net
= sock_net(sk
);
1568 struct ipv6hdr
*hdr
;
1569 struct ipv6_txoptions
*opt
= np
->cork
.opt
;
1570 struct rt6_info
*rt
= (struct rt6_info
*)inet
->cork
.base
.dst
;
1571 struct flowi6
*fl6
= &inet
->cork
.fl
.u
.ip6
;
1572 unsigned char proto
= fl6
->flowi6_proto
;
1575 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1577 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1579 /* move skb->data to ip header from ext header */
1580 if (skb
->data
< skb_network_header(skb
))
1581 __skb_pull(skb
, skb_network_offset(skb
));
1582 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1583 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1584 *tail_skb
= tmp_skb
;
1585 tail_skb
= &(tmp_skb
->next
);
1586 skb
->len
+= tmp_skb
->len
;
1587 skb
->data_len
+= tmp_skb
->len
;
1588 skb
->truesize
+= tmp_skb
->truesize
;
1589 tmp_skb
->destructor
= NULL
;
1593 /* Allow local fragmentation. */
1594 if (np
->pmtudisc
< IPV6_PMTUDISC_DO
)
1597 *final_dst
= fl6
->daddr
;
1598 __skb_pull(skb
, skb_network_header_len(skb
));
1599 if (opt
&& opt
->opt_flen
)
1600 ipv6_push_frag_opts(skb
, opt
, &proto
);
1601 if (opt
&& opt
->opt_nflen
)
1602 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1604 skb_push(skb
, sizeof(struct ipv6hdr
));
1605 skb_reset_network_header(skb
);
1606 hdr
= ipv6_hdr(skb
);
1608 *(__be32
*)hdr
= fl6
->flowlabel
|
1609 htonl(0x60000000 | ((int)np
->cork
.tclass
<< 20));
1611 hdr
->hop_limit
= np
->cork
.hop_limit
;
1612 hdr
->nexthdr
= proto
;
1613 hdr
->saddr
= fl6
->saddr
;
1614 hdr
->daddr
= *final_dst
;
1616 skb
->priority
= sk
->sk_priority
;
1617 skb
->mark
= sk
->sk_mark
;
1619 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1620 IP6_UPD_PO_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
1621 if (proto
== IPPROTO_ICMPV6
) {
1622 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
1624 ICMP6MSGOUT_INC_STATS_BH(net
, idev
, icmp6_hdr(skb
)->icmp6_type
);
1625 ICMP6_INC_STATS_BH(net
, idev
, ICMP6_MIB_OUTMSGS
);
1628 err
= ip6_local_out(skb
);
1631 err
= net_xmit_errno(err
);
1637 ip6_cork_release(inet
, np
);
1640 IP6_INC_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1644 void ip6_flush_pending_frames(struct sock
*sk
)
1646 struct sk_buff
*skb
;
1648 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
) {
1650 IP6_INC_STATS(sock_net(sk
), ip6_dst_idev(skb_dst(skb
)),
1651 IPSTATS_MIB_OUTDISCARDS
);
1655 ip6_cork_release(inet_sk(sk
), inet6_sk(sk
));