2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/config.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/net.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/in6.h>
40 #include <linux/tcp.h>
41 #include <linux/route.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*));
61 static __inline__
void ipv6_select_ident(struct sk_buff
*skb
, struct frag_hdr
*fhdr
)
63 static u32 ipv6_fragmentation_id
= 1;
64 static DEFINE_SPINLOCK(ip6_id_lock
);
66 spin_lock_bh(&ip6_id_lock
);
67 fhdr
->identification
= htonl(ipv6_fragmentation_id
);
68 if (++ipv6_fragmentation_id
== 0)
69 ipv6_fragmentation_id
= 1;
70 spin_unlock_bh(&ip6_id_lock
);
73 static inline int ip6_output_finish(struct sk_buff
*skb
)
76 struct dst_entry
*dst
= skb
->dst
;
77 struct hh_cache
*hh
= dst
->hh
;
82 read_lock_bh(&hh
->hh_lock
);
83 hh_alen
= HH_DATA_ALIGN(hh
->hh_len
);
84 memcpy(skb
->data
- hh_alen
, hh
->hh_data
, hh_alen
);
85 read_unlock_bh(&hh
->hh_lock
);
86 skb_push(skb
, hh
->hh_len
);
87 return hh
->hh_output(skb
);
88 } else if (dst
->neighbour
)
89 return dst
->neighbour
->output(skb
);
91 IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES
);
97 /* dev_loopback_xmit for use with netfilter. */
98 static int ip6_dev_loopback_xmit(struct sk_buff
*newskb
)
100 newskb
->mac
.raw
= newskb
->data
;
101 __skb_pull(newskb
, newskb
->nh
.raw
- newskb
->data
);
102 newskb
->pkt_type
= PACKET_LOOPBACK
;
103 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
104 BUG_TRAP(newskb
->dst
);
111 static int ip6_output2(struct sk_buff
*skb
)
113 struct dst_entry
*dst
= skb
->dst
;
114 struct net_device
*dev
= dst
->dev
;
116 skb
->protocol
= htons(ETH_P_IPV6
);
119 if (ipv6_addr_is_multicast(&skb
->nh
.ipv6h
->daddr
)) {
120 struct ipv6_pinfo
* np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
122 if (!(dev
->flags
& IFF_LOOPBACK
) && (!np
|| np
->mc_loop
) &&
123 ipv6_chk_mcast_addr(dev
, &skb
->nh
.ipv6h
->daddr
,
124 &skb
->nh
.ipv6h
->saddr
)) {
125 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
127 /* Do not check for IFF_ALLMULTI; multicast routing
128 is not supported in any case.
131 NF_HOOK(PF_INET6
, NF_IP6_POST_ROUTING
, newskb
, NULL
,
133 ip6_dev_loopback_xmit
);
135 if (skb
->nh
.ipv6h
->hop_limit
== 0) {
136 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
142 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS
);
145 return NF_HOOK(PF_INET6
, NF_IP6_POST_ROUTING
, skb
,NULL
, skb
->dev
,ip6_output_finish
);
148 int ip6_output(struct sk_buff
*skb
)
150 if ((skb
->len
> dst_mtu(skb
->dst
) && !skb_shinfo(skb
)->ufo_size
) ||
151 dst_allfrag(skb
->dst
))
152 return ip6_fragment(skb
, ip6_output2
);
154 return ip6_output2(skb
);
158 * xmit an sk_buff (used by TCP)
161 int ip6_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
162 struct ipv6_txoptions
*opt
, int ipfragok
)
164 struct ipv6_pinfo
*np
= inet6_sk(sk
);
165 struct in6_addr
*first_hop
= &fl
->fl6_dst
;
166 struct dst_entry
*dst
= skb
->dst
;
168 u8 proto
= fl
->proto
;
169 int seg_len
= skb
->len
;
176 /* First: exthdrs may take lots of space (~8K for now)
177 MAX_HEADER is not enough.
179 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
180 seg_len
+= head_room
;
181 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
183 if (skb_headroom(skb
) < head_room
) {
184 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
188 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
192 skb_set_owner_w(skb
, sk
);
195 ipv6_push_frag_opts(skb
, opt
, &proto
);
197 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
200 hdr
= skb
->nh
.ipv6h
= (struct ipv6hdr
*)skb_push(skb
, sizeof(struct ipv6hdr
));
203 * Fill in the IPv6 header
208 hlimit
= np
->hop_limit
;
210 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
212 hlimit
= ipv6_get_hoplimit(dst
->dev
);
220 *(u32
*)hdr
= htonl(0x60000000 | (tclass
<< 20)) | fl
->fl6_flowlabel
;
222 hdr
->payload_len
= htons(seg_len
);
223 hdr
->nexthdr
= proto
;
224 hdr
->hop_limit
= hlimit
;
226 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
227 ipv6_addr_copy(&hdr
->daddr
, first_hop
);
229 skb
->priority
= sk
->sk_priority
;
232 if ((skb
->len
<= mtu
) || ipfragok
) {
233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
234 return NF_HOOK(PF_INET6
, NF_IP6_LOCAL_OUT
, skb
, NULL
, dst
->dev
,
239 printk(KERN_DEBUG
"IPv6: sending pkt_too_big to self\n");
241 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, skb
->dev
);
242 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
248 * To avoid extra problems ND packets are send through this
249 * routine. It's code duplication but I really want to avoid
250 * extra checks since ipv6_build_header is used by TCP (which
251 * is for us performance critical)
254 int ip6_nd_hdr(struct sock
*sk
, struct sk_buff
*skb
, struct net_device
*dev
,
255 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
258 struct ipv6_pinfo
*np
= inet6_sk(sk
);
262 skb
->protocol
= htons(ETH_P_IPV6
);
265 totlen
= len
+ sizeof(struct ipv6hdr
);
267 hdr
= (struct ipv6hdr
*) skb_put(skb
, sizeof(struct ipv6hdr
));
270 *(u32
*)hdr
= htonl(0x60000000);
272 hdr
->payload_len
= htons(len
);
273 hdr
->nexthdr
= proto
;
274 hdr
->hop_limit
= np
->hop_limit
;
276 ipv6_addr_copy(&hdr
->saddr
, saddr
);
277 ipv6_addr_copy(&hdr
->daddr
, daddr
);
282 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
284 struct ip6_ra_chain
*ra
;
285 struct sock
*last
= NULL
;
287 read_lock(&ip6_ra_lock
);
288 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
289 struct sock
*sk
= ra
->sk
;
290 if (sk
&& ra
->sel
== sel
&&
291 (!sk
->sk_bound_dev_if
||
292 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
294 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
296 rawv6_rcv(last
, skb2
);
303 rawv6_rcv(last
, skb
);
304 read_unlock(&ip6_ra_lock
);
307 read_unlock(&ip6_ra_lock
);
311 static inline int ip6_forward_finish(struct sk_buff
*skb
)
313 return dst_output(skb
);
316 int ip6_forward(struct sk_buff
*skb
)
318 struct dst_entry
*dst
= skb
->dst
;
319 struct ipv6hdr
*hdr
= skb
->nh
.ipv6h
;
320 struct inet6_skb_parm
*opt
= IP6CB(skb
);
322 if (ipv6_devconf
.forwarding
== 0)
325 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
326 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS
);
330 skb
->ip_summed
= CHECKSUM_NONE
;
333 * We DO NOT make any processing on
334 * RA packets, pushing them to user level AS IS
335 * without ane WARRANTY that application will be able
336 * to interpret them. The reason is that we
337 * cannot make anything clever here.
339 * We are not end-node, so that if packet contains
340 * AH/ESP, we cannot make anything.
341 * Defragmentation also would be mistake, RA packets
342 * cannot be fragmented, because there is no warranty
343 * that different fragments will go along one path. --ANK
346 u8
*ptr
= skb
->nh
.raw
+ opt
->ra
;
347 if (ip6_call_ra_chain(skb
, (ptr
[2]<<8) + ptr
[3]))
352 * check and decrement ttl
354 if (hdr
->hop_limit
<= 1) {
355 /* Force OUTPUT device used as source address */
357 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
364 if (!xfrm6_route_forward(skb
)) {
365 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS
);
370 /* IPv6 specs say nothing about it, but it is clear that we cannot
371 send redirects to source routed frames.
373 if (skb
->dev
== dst
->dev
&& dst
->neighbour
&& opt
->srcrt
== 0) {
374 struct in6_addr
*target
= NULL
;
376 struct neighbour
*n
= dst
->neighbour
;
379 * incoming and outgoing devices are the same
383 rt
= (struct rt6_info
*) dst
;
384 if ((rt
->rt6i_flags
& RTF_GATEWAY
))
385 target
= (struct in6_addr
*)&n
->primary_key
;
387 target
= &hdr
->daddr
;
389 /* Limit redirects both by destination (here)
390 and by source (inside ndisc_send_redirect)
392 if (xrlim_allow(dst
, 1*HZ
))
393 ndisc_send_redirect(skb
, n
, target
);
394 } else if (ipv6_addr_type(&hdr
->saddr
)&(IPV6_ADDR_MULTICAST
|IPV6_ADDR_LOOPBACK
395 |IPV6_ADDR_LINKLOCAL
)) {
396 /* This check is security critical. */
400 if (skb
->len
> dst_mtu(dst
)) {
401 /* Again, force OUTPUT device used as source address */
403 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, dst_mtu(dst
), skb
->dev
);
404 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS
);
405 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS
);
410 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
411 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
417 /* Mangling hops number delayed to point after skb COW */
421 IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS
);
422 return NF_HOOK(PF_INET6
,NF_IP6_FORWARD
, skb
, skb
->dev
, dst
->dev
, ip6_forward_finish
);
425 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
431 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
433 to
->pkt_type
= from
->pkt_type
;
434 to
->priority
= from
->priority
;
435 to
->protocol
= from
->protocol
;
436 dst_release(to
->dst
);
437 to
->dst
= dst_clone(from
->dst
);
440 #ifdef CONFIG_NET_SCHED
441 to
->tc_index
= from
->tc_index
;
443 #ifdef CONFIG_NETFILTER
444 to
->nfmark
= from
->nfmark
;
445 /* Connection association is same as pre-frag packet */
446 nf_conntrack_put(to
->nfct
);
447 to
->nfct
= from
->nfct
;
448 nf_conntrack_get(to
->nfct
);
449 to
->nfctinfo
= from
->nfctinfo
;
450 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
451 nf_conntrack_put_reasm(to
->nfct_reasm
);
452 to
->nfct_reasm
= from
->nfct_reasm
;
453 nf_conntrack_get_reasm(to
->nfct_reasm
);
455 #ifdef CONFIG_BRIDGE_NETFILTER
456 nf_bridge_put(to
->nf_bridge
);
457 to
->nf_bridge
= from
->nf_bridge
;
458 nf_bridge_get(to
->nf_bridge
);
463 int ip6_find_1stfragopt(struct sk_buff
*skb
, u8
**nexthdr
)
465 u16 offset
= sizeof(struct ipv6hdr
);
466 struct ipv6_opt_hdr
*exthdr
= (struct ipv6_opt_hdr
*)(skb
->nh
.ipv6h
+ 1);
467 unsigned int packet_len
= skb
->tail
- skb
->nh
.raw
;
469 *nexthdr
= &skb
->nh
.ipv6h
->nexthdr
;
471 while (offset
+ 1 <= packet_len
) {
476 case NEXTHDR_ROUTING
:
478 if (**nexthdr
== NEXTHDR_ROUTING
) found_rhdr
= 1;
479 if (**nexthdr
== NEXTHDR_DEST
&& found_rhdr
) return offset
;
480 offset
+= ipv6_optlen(exthdr
);
481 *nexthdr
= &exthdr
->nexthdr
;
482 exthdr
= (struct ipv6_opt_hdr
*)(skb
->nh
.raw
+ offset
);
492 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
494 struct net_device
*dev
;
495 struct sk_buff
*frag
;
496 struct rt6_info
*rt
= (struct rt6_info
*)skb
->dst
;
497 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
498 struct ipv6hdr
*tmp_hdr
;
500 unsigned int mtu
, hlen
, left
, len
;
502 int ptr
, offset
= 0, err
=0;
503 u8
*prevhdr
, nexthdr
= 0;
506 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
509 mtu
= dst_mtu(&rt
->u
.dst
);
510 if (np
&& np
->frag_size
< mtu
) {
514 mtu
-= hlen
+ sizeof(struct frag_hdr
);
516 if (skb_shinfo(skb
)->frag_list
) {
517 int first_len
= skb_pagelen(skb
);
519 if (first_len
- hlen
> mtu
||
520 ((first_len
- hlen
) & 7) ||
524 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
) {
525 /* Correct geometry. */
526 if (frag
->len
> mtu
||
527 ((frag
->len
& 7) && frag
->next
) ||
528 skb_headroom(frag
) < hlen
)
531 /* Partially cloned skb? */
532 if (skb_shared(frag
))
539 frag
->destructor
= sock_wfree
;
540 skb
->truesize
-= frag
->truesize
;
546 frag
= skb_shinfo(skb
)->frag_list
;
547 skb_shinfo(skb
)->frag_list
= NULL
;
550 tmp_hdr
= kmalloc(hlen
, GFP_ATOMIC
);
552 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
556 *prevhdr
= NEXTHDR_FRAGMENT
;
557 memcpy(tmp_hdr
, skb
->nh
.raw
, hlen
);
558 __skb_pull(skb
, hlen
);
559 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
560 skb
->nh
.raw
= __skb_push(skb
, hlen
);
561 memcpy(skb
->nh
.raw
, tmp_hdr
, hlen
);
563 ipv6_select_ident(skb
, fh
);
564 fh
->nexthdr
= nexthdr
;
566 fh
->frag_off
= htons(IP6_MF
);
567 frag_id
= fh
->identification
;
569 first_len
= skb_pagelen(skb
);
570 skb
->data_len
= first_len
- skb_headlen(skb
);
571 skb
->len
= first_len
;
572 skb
->nh
.ipv6h
->payload_len
= htons(first_len
- sizeof(struct ipv6hdr
));
576 /* Prepare header of the next frame,
577 * before previous one went down. */
579 frag
->ip_summed
= CHECKSUM_NONE
;
580 frag
->h
.raw
= frag
->data
;
581 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
582 frag
->nh
.raw
= __skb_push(frag
, hlen
);
583 memcpy(frag
->nh
.raw
, tmp_hdr
, hlen
);
584 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
585 fh
->nexthdr
= nexthdr
;
587 fh
->frag_off
= htons(offset
);
588 if (frag
->next
!= NULL
)
589 fh
->frag_off
|= htons(IP6_MF
);
590 fh
->identification
= frag_id
;
591 frag
->nh
.ipv6h
->payload_len
= htons(frag
->len
- sizeof(struct ipv6hdr
));
592 ip6_copy_metadata(frag
, skb
);
607 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS
);
617 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
622 left
= skb
->len
- hlen
; /* Space per frame */
623 ptr
= hlen
; /* Where to start from */
626 * Fragment the datagram.
629 *prevhdr
= NEXTHDR_FRAGMENT
;
632 * Keep copying data until we run out.
636 /* IF: it doesn't fit, use 'mtu' - the data space left */
639 /* IF: we are not sending upto and including the packet end
640 then align the next start on an eight byte boundary */
648 if ((frag
= alloc_skb(len
+hlen
+sizeof(struct frag_hdr
)+LL_RESERVED_SPACE(rt
->u
.dst
.dev
), GFP_ATOMIC
)) == NULL
) {
649 NETDEBUG(KERN_INFO
"IPv6: frag: no memory for new fragment!\n");
650 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
656 * Set up data on packet
659 ip6_copy_metadata(frag
, skb
);
660 skb_reserve(frag
, LL_RESERVED_SPACE(rt
->u
.dst
.dev
));
661 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
662 frag
->nh
.raw
= frag
->data
;
663 fh
= (struct frag_hdr
*)(frag
->data
+ hlen
);
664 frag
->h
.raw
= frag
->data
+ hlen
+ sizeof(struct frag_hdr
);
667 * Charge the memory for the fragment to any owner
671 skb_set_owner_w(frag
, skb
->sk
);
674 * Copy the packet header into the new buffer.
676 memcpy(frag
->nh
.raw
, skb
->data
, hlen
);
679 * Build fragment header.
681 fh
->nexthdr
= nexthdr
;
684 ipv6_select_ident(skb
, fh
);
685 frag_id
= fh
->identification
;
687 fh
->identification
= frag_id
;
690 * Copy a block of the IP datagram.
692 if (skb_copy_bits(skb
, ptr
, frag
->h
.raw
, len
))
696 fh
->frag_off
= htons(offset
);
698 fh
->frag_off
|= htons(IP6_MF
);
699 frag
->nh
.ipv6h
->payload_len
= htons(frag
->len
- sizeof(struct ipv6hdr
));
705 * Put this fragment into the sending queue.
708 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES
);
715 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS
);
720 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
724 int ip6_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
730 struct ipv6_pinfo
*np
= inet6_sk(sk
);
732 *dst
= sk_dst_check(sk
, np
->dst_cookie
);
734 struct rt6_info
*rt
= (struct rt6_info
*)*dst
;
736 /* Yes, checking route validity in not connected
737 * case is not very simple. Take into account,
738 * that we do not support routing by source, TOS,
739 * and MSG_DONTROUTE --ANK (980726)
741 * 1. If route was host route, check that
742 * cached destination is current.
743 * If it is network route, we still may
744 * check its validity using saved pointer
745 * to the last used address: daddr_cache.
746 * We do not want to save whole address now,
747 * (because main consumer of this service
748 * is tcp, which has not this problem),
749 * so that the last trick works only on connected
751 * 2. oif also should be the same.
753 if (((rt
->rt6i_dst
.plen
!= 128 ||
754 !ipv6_addr_equal(&fl
->fl6_dst
,
756 && (np
->daddr_cache
== NULL
||
757 !ipv6_addr_equal(&fl
->fl6_dst
,
759 || (fl
->oif
&& fl
->oif
!= (*dst
)->dev
->ifindex
)) {
767 *dst
= ip6_route_output(sk
, fl
);
769 if ((err
= (*dst
)->error
))
770 goto out_err_release
;
772 if (ipv6_addr_any(&fl
->fl6_src
)) {
773 err
= ipv6_get_saddr(*dst
, &fl
->fl6_dst
, &fl
->fl6_src
);
776 goto out_err_release
;
787 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
789 static inline int ip6_ufo_append_data(struct sock
*sk
,
790 int getfrag(void *from
, char *to
, int offset
, int len
,
791 int odd
, struct sk_buff
*skb
),
792 void *from
, int length
, int hh_len
, int fragheaderlen
,
793 int transhdrlen
, int mtu
,unsigned int flags
)
799 /* There is support for UDP large send offload by network
800 * device, so create one single skb packet containing complete
803 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
804 skb
= sock_alloc_send_skb(sk
,
805 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
806 (flags
& MSG_DONTWAIT
), &err
);
810 /* reserve space for Hardware header */
811 skb_reserve(skb
, hh_len
);
813 /* create space for UDP/IP header */
814 skb_put(skb
,fragheaderlen
+ transhdrlen
);
816 /* initialize network header pointer */
817 skb
->nh
.raw
= skb
->data
;
819 /* initialize protocol header pointer */
820 skb
->h
.raw
= skb
->data
+ fragheaderlen
;
822 skb
->ip_summed
= CHECKSUM_HW
;
824 sk
->sk_sndmsg_off
= 0;
827 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
828 (length
- transhdrlen
));
830 struct frag_hdr fhdr
;
832 /* specify the length of each IP datagram fragment*/
833 skb_shinfo(skb
)->ufo_size
= (mtu
- fragheaderlen
) -
834 sizeof(struct frag_hdr
);
835 ipv6_select_ident(skb
, &fhdr
);
836 skb_shinfo(skb
)->ip6_frag_id
= fhdr
.identification
;
837 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
841 /* There is not enough support do UPD LSO,
842 * so follow normal path
849 int ip6_append_data(struct sock
*sk
, int getfrag(void *from
, char *to
,
850 int offset
, int len
, int odd
, struct sk_buff
*skb
),
851 void *from
, int length
, int transhdrlen
,
852 int hlimit
, int tclass
, struct ipv6_txoptions
*opt
, struct flowi
*fl
,
853 struct rt6_info
*rt
, unsigned int flags
)
855 struct inet_sock
*inet
= inet_sk(sk
);
856 struct ipv6_pinfo
*np
= inet6_sk(sk
);
858 unsigned int maxfraglen
, fragheaderlen
;
865 int csummode
= CHECKSUM_NONE
;
869 if (skb_queue_empty(&sk
->sk_write_queue
)) {
874 if (np
->cork
.opt
== NULL
) {
875 np
->cork
.opt
= kmalloc(opt
->tot_len
,
877 if (unlikely(np
->cork
.opt
== NULL
))
879 } else if (np
->cork
.opt
->tot_len
< opt
->tot_len
) {
880 printk(KERN_DEBUG
"ip6_append_data: invalid option length\n");
883 memcpy(np
->cork
.opt
, opt
, opt
->tot_len
);
884 inet
->cork
.flags
|= IPCORK_OPT
;
885 /* need source address above miyazawa*/
887 dst_hold(&rt
->u
.dst
);
890 np
->cork
.hop_limit
= hlimit
;
891 np
->cork
.tclass
= tclass
;
892 mtu
= dst_mtu(rt
->u
.dst
.path
);
893 if (np
->frag_size
< mtu
) {
897 inet
->cork
.fragsize
= mtu
;
898 if (dst_allfrag(rt
->u
.dst
.path
))
899 inet
->cork
.flags
|= IPCORK_ALLFRAG
;
900 inet
->cork
.length
= 0;
901 sk
->sk_sndmsg_page
= NULL
;
902 sk
->sk_sndmsg_off
= 0;
903 exthdrlen
= rt
->u
.dst
.header_len
+ (opt
? opt
->opt_flen
: 0);
905 transhdrlen
+= exthdrlen
;
909 if (inet
->cork
.flags
& IPCORK_OPT
)
913 mtu
= inet
->cork
.fragsize
;
916 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
918 fragheaderlen
= sizeof(struct ipv6hdr
) + (opt
? opt
->opt_nflen
: 0);
919 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
- sizeof(struct frag_hdr
);
921 if (mtu
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
) {
922 if (inet
->cork
.length
+ length
> sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
- fragheaderlen
) {
923 ipv6_local_error(sk
, EMSGSIZE
, fl
, mtu
-exthdrlen
);
929 * Let's try using as much space as possible.
930 * Use MTU if total length of the message fits into the MTU.
931 * Otherwise, we need to reserve fragment header and
932 * fragment alignment (= 8-15 octects, in total).
934 * Note that we may need to "move" the data from the tail of
935 * of the buffer to the new fragment when we split
938 * FIXME: It may be fragmented into multiple chunks
939 * at once if non-fragmentable extension headers
944 inet
->cork
.length
+= length
;
945 if (((length
> mtu
) && (sk
->sk_protocol
== IPPROTO_UDP
)) &&
946 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
948 err
= ip6_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
949 fragheaderlen
, transhdrlen
, mtu
,
956 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
960 /* Check if the remaining data fits into current packet. */
961 copy
= (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
963 copy
= maxfraglen
- skb
->len
;
967 unsigned int datalen
;
968 unsigned int fraglen
;
969 unsigned int fraggap
;
970 unsigned int alloclen
;
971 struct sk_buff
*skb_prev
;
975 /* There's no room in the current skb */
977 fraggap
= skb_prev
->len
- maxfraglen
;
982 * If remaining data exceeds the mtu,
983 * we know we need more fragment(s).
985 datalen
= length
+ fraggap
;
986 if (datalen
> (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
987 datalen
= maxfraglen
- fragheaderlen
;
989 fraglen
= datalen
+ fragheaderlen
;
990 if ((flags
& MSG_MORE
) &&
991 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
994 alloclen
= datalen
+ fragheaderlen
;
997 * The last fragment gets additional space at tail.
998 * Note: we overallocate on fragments with MSG_MODE
999 * because we have no idea if we're the last one.
1001 if (datalen
== length
+ fraggap
)
1002 alloclen
+= rt
->u
.dst
.trailer_len
;
1005 * We just reserve space for fragment header.
1006 * Note: this may be overallocation if the message
1007 * (without MSG_MORE) fits into the MTU.
1009 alloclen
+= sizeof(struct frag_hdr
);
1012 skb
= sock_alloc_send_skb(sk
,
1014 (flags
& MSG_DONTWAIT
), &err
);
1017 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1019 skb
= sock_wmalloc(sk
,
1020 alloclen
+ hh_len
, 1,
1022 if (unlikely(skb
== NULL
))
1028 * Fill in the control structures
1030 skb
->ip_summed
= csummode
;
1032 /* reserve for fragmentation */
1033 skb_reserve(skb
, hh_len
+sizeof(struct frag_hdr
));
1036 * Find where to start putting bytes
1038 data
= skb_put(skb
, fraglen
);
1039 skb
->nh
.raw
= data
+ exthdrlen
;
1040 data
+= fragheaderlen
;
1041 skb
->h
.raw
= data
+ exthdrlen
;
1044 skb
->csum
= skb_copy_and_csum_bits(
1045 skb_prev
, maxfraglen
,
1046 data
+ transhdrlen
, fraggap
, 0);
1047 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1050 skb_trim(skb_prev
, maxfraglen
);
1052 copy
= datalen
- transhdrlen
- fraggap
;
1057 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1064 length
-= datalen
- fraggap
;
1067 csummode
= CHECKSUM_NONE
;
1070 * Put the packet on the pending queue
1072 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1079 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
1083 if (getfrag(from
, skb_put(skb
, copy
),
1084 offset
, copy
, off
, skb
) < 0) {
1085 __skb_trim(skb
, off
);
1090 int i
= skb_shinfo(skb
)->nr_frags
;
1091 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1092 struct page
*page
= sk
->sk_sndmsg_page
;
1093 int off
= sk
->sk_sndmsg_off
;
1096 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1099 if (page
!= frag
->page
) {
1100 if (i
== MAX_SKB_FRAGS
) {
1105 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1106 frag
= &skb_shinfo(skb
)->frags
[i
];
1108 } else if(i
< MAX_SKB_FRAGS
) {
1109 if (copy
> PAGE_SIZE
)
1111 page
= alloc_pages(sk
->sk_allocation
, 0);
1116 sk
->sk_sndmsg_page
= page
;
1117 sk
->sk_sndmsg_off
= 0;
1119 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1120 frag
= &skb_shinfo(skb
)->frags
[i
];
1121 skb
->truesize
+= PAGE_SIZE
;
1122 atomic_add(PAGE_SIZE
, &sk
->sk_wmem_alloc
);
1127 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1131 sk
->sk_sndmsg_off
+= copy
;
1134 skb
->data_len
+= copy
;
1141 inet
->cork
.length
-= length
;
1142 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1146 int ip6_push_pending_frames(struct sock
*sk
)
1148 struct sk_buff
*skb
, *tmp_skb
;
1149 struct sk_buff
**tail_skb
;
1150 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1151 struct inet_sock
*inet
= inet_sk(sk
);
1152 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1153 struct ipv6hdr
*hdr
;
1154 struct ipv6_txoptions
*opt
= np
->cork
.opt
;
1155 struct rt6_info
*rt
= np
->cork
.rt
;
1156 struct flowi
*fl
= &inet
->cork
.fl
;
1157 unsigned char proto
= fl
->proto
;
1160 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1162 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1164 /* move skb->data to ip header from ext header */
1165 if (skb
->data
< skb
->nh
.raw
)
1166 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1167 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1168 __skb_pull(tmp_skb
, skb
->h
.raw
- skb
->nh
.raw
);
1169 *tail_skb
= tmp_skb
;
1170 tail_skb
= &(tmp_skb
->next
);
1171 skb
->len
+= tmp_skb
->len
;
1172 skb
->data_len
+= tmp_skb
->len
;
1173 skb
->truesize
+= tmp_skb
->truesize
;
1174 __sock_put(tmp_skb
->sk
);
1175 tmp_skb
->destructor
= NULL
;
1179 ipv6_addr_copy(final_dst
, &fl
->fl6_dst
);
1180 __skb_pull(skb
, skb
->h
.raw
- skb
->nh
.raw
);
1181 if (opt
&& opt
->opt_flen
)
1182 ipv6_push_frag_opts(skb
, opt
, &proto
);
1183 if (opt
&& opt
->opt_nflen
)
1184 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1186 skb
->nh
.ipv6h
= hdr
= (struct ipv6hdr
*) skb_push(skb
, sizeof(struct ipv6hdr
));
1188 *(u32
*)hdr
= fl
->fl6_flowlabel
|
1189 htonl(0x60000000 | ((int)np
->cork
.tclass
<< 20));
1191 if (skb
->len
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
)
1192 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
1194 hdr
->payload_len
= 0;
1195 hdr
->hop_limit
= np
->cork
.hop_limit
;
1196 hdr
->nexthdr
= proto
;
1197 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
1198 ipv6_addr_copy(&hdr
->daddr
, final_dst
);
1200 skb
->priority
= sk
->sk_priority
;
1202 skb
->dst
= dst_clone(&rt
->u
.dst
);
1203 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
1204 err
= NF_HOOK(PF_INET6
, NF_IP6_LOCAL_OUT
, skb
, NULL
, skb
->dst
->dev
, dst_output
);
1207 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1213 inet
->cork
.flags
&= ~IPCORK_OPT
;
1214 kfree(np
->cork
.opt
);
1215 np
->cork
.opt
= NULL
;
1217 dst_release(&np
->cork
.rt
->u
.dst
);
1219 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1221 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));
1227 void ip6_flush_pending_frames(struct sock
*sk
)
1229 struct inet_sock
*inet
= inet_sk(sk
);
1230 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1231 struct sk_buff
*skb
;
1233 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
) {
1234 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1238 inet
->cork
.flags
&= ~IPCORK_OPT
;
1240 kfree(np
->cork
.opt
);
1241 np
->cork
.opt
= NULL
;
1243 dst_release(&np
->cork
.rt
->u
.dst
);
1245 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1247 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));