2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 #include <linux/socket.h>
35 #include <linux/net.h>
36 #include <linux/netdevice.h>
37 #include <linux/if_arp.h>
38 #include <linux/in6.h>
39 #include <linux/tcp.h>
40 #include <linux/route.h>
41 #include <linux/module.h>
43 #include <linux/netfilter.h>
44 #include <linux/netfilter_ipv6.h>
50 #include <net/ndisc.h>
51 #include <net/protocol.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/rawv6.h>
57 #include <net/checksum.h>
59 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*));
61 static __inline__
void ipv6_select_ident(struct sk_buff
*skb
, struct frag_hdr
*fhdr
)
63 static u32 ipv6_fragmentation_id
= 1;
64 static DEFINE_SPINLOCK(ip6_id_lock
);
66 spin_lock_bh(&ip6_id_lock
);
67 fhdr
->identification
= htonl(ipv6_fragmentation_id
);
68 if (++ipv6_fragmentation_id
== 0)
69 ipv6_fragmentation_id
= 1;
70 spin_unlock_bh(&ip6_id_lock
);
73 int __ip6_local_out(struct sk_buff
*skb
)
77 len
= skb
->len
- sizeof(struct ipv6hdr
);
78 if (len
> IPV6_MAXPLEN
)
80 ipv6_hdr(skb
)->payload_len
= htons(len
);
82 return nf_hook(PF_INET6
, NF_INET_LOCAL_OUT
, skb
, NULL
, skb
->dst
->dev
,
86 int ip6_local_out(struct sk_buff
*skb
)
90 err
= __ip6_local_out(skb
);
92 err
= dst_output(skb
);
96 EXPORT_SYMBOL_GPL(ip6_local_out
);
98 static int ip6_output_finish(struct sk_buff
*skb
)
100 struct dst_entry
*dst
= skb
->dst
;
103 return neigh_hh_output(dst
->hh
, skb
);
104 else if (dst
->neighbour
)
105 return dst
->neighbour
->output(skb
);
107 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
113 /* dev_loopback_xmit for use with netfilter. */
114 static int ip6_dev_loopback_xmit(struct sk_buff
*newskb
)
116 skb_reset_mac_header(newskb
);
117 __skb_pull(newskb
, skb_network_offset(newskb
));
118 newskb
->pkt_type
= PACKET_LOOPBACK
;
119 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
120 BUG_TRAP(newskb
->dst
);
127 static int ip6_output2(struct sk_buff
*skb
)
129 struct dst_entry
*dst
= skb
->dst
;
130 struct net_device
*dev
= dst
->dev
;
132 skb
->protocol
= htons(ETH_P_IPV6
);
135 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
)) {
136 struct ipv6_pinfo
* np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
137 struct inet6_dev
*idev
= ip6_dst_idev(skb
->dst
);
139 if (!(dev
->flags
& IFF_LOOPBACK
) && (!np
|| np
->mc_loop
) &&
140 ipv6_chk_mcast_addr(dev
, &ipv6_hdr(skb
)->daddr
,
141 &ipv6_hdr(skb
)->saddr
)) {
142 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
144 /* Do not check for IFF_ALLMULTI; multicast routing
145 is not supported in any case.
148 NF_HOOK(PF_INET6
, NF_INET_POST_ROUTING
, newskb
,
150 ip6_dev_loopback_xmit
);
152 if (ipv6_hdr(skb
)->hop_limit
== 0) {
153 IP6_INC_STATS(idev
, IPSTATS_MIB_OUTDISCARDS
);
159 IP6_INC_STATS(idev
, IPSTATS_MIB_OUTMCASTPKTS
);
162 return NF_HOOK(PF_INET6
, NF_INET_POST_ROUTING
, skb
, NULL
, skb
->dev
,
166 static inline int ip6_skb_dst_mtu(struct sk_buff
*skb
)
168 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
170 return (np
&& np
->pmtudisc
== IPV6_PMTUDISC_PROBE
) ?
171 skb
->dst
->dev
->mtu
: dst_mtu(skb
->dst
);
174 int ip6_output(struct sk_buff
*skb
)
176 if ((skb
->len
> ip6_skb_dst_mtu(skb
) && !skb_is_gso(skb
)) ||
177 dst_allfrag(skb
->dst
))
178 return ip6_fragment(skb
, ip6_output2
);
180 return ip6_output2(skb
);
184 * xmit an sk_buff (used by TCP)
187 int ip6_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
188 struct ipv6_txoptions
*opt
, int ipfragok
)
190 struct ipv6_pinfo
*np
= inet6_sk(sk
);
191 struct in6_addr
*first_hop
= &fl
->fl6_dst
;
192 struct dst_entry
*dst
= skb
->dst
;
194 u8 proto
= fl
->proto
;
195 int seg_len
= skb
->len
;
200 unsigned int head_room
;
202 /* First: exthdrs may take lots of space (~8K for now)
203 MAX_HEADER is not enough.
205 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
206 seg_len
+= head_room
;
207 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
209 if (skb_headroom(skb
) < head_room
) {
210 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
212 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
213 IPSTATS_MIB_OUTDISCARDS
);
220 skb_set_owner_w(skb
, sk
);
223 ipv6_push_frag_opts(skb
, opt
, &proto
);
225 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
228 skb_push(skb
, sizeof(struct ipv6hdr
));
229 skb_reset_network_header(skb
);
233 * Fill in the IPv6 header
238 hlimit
= np
->hop_limit
;
240 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
242 hlimit
= ipv6_get_hoplimit(dst
->dev
);
250 *(__be32
*)hdr
= htonl(0x60000000 | (tclass
<< 20)) | fl
->fl6_flowlabel
;
252 hdr
->payload_len
= htons(seg_len
);
253 hdr
->nexthdr
= proto
;
254 hdr
->hop_limit
= hlimit
;
256 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
257 ipv6_addr_copy(&hdr
->daddr
, first_hop
);
259 skb
->priority
= sk
->sk_priority
;
260 skb
->mark
= sk
->sk_mark
;
263 if ((skb
->len
<= mtu
) || ipfragok
|| skb_is_gso(skb
)) {
264 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
265 IPSTATS_MIB_OUTREQUESTS
);
266 return NF_HOOK(PF_INET6
, NF_INET_LOCAL_OUT
, skb
, NULL
, dst
->dev
,
271 printk(KERN_DEBUG
"IPv6: sending pkt_too_big to self\n");
273 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, skb
->dev
);
274 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_FRAGFAILS
);
279 EXPORT_SYMBOL(ip6_xmit
);
282 * To avoid extra problems ND packets are send through this
283 * routine. It's code duplication but I really want to avoid
284 * extra checks since ipv6_build_header is used by TCP (which
285 * is for us performance critical)
288 int ip6_nd_hdr(struct sock
*sk
, struct sk_buff
*skb
, struct net_device
*dev
,
289 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
292 struct ipv6_pinfo
*np
= inet6_sk(sk
);
296 skb
->protocol
= htons(ETH_P_IPV6
);
299 totlen
= len
+ sizeof(struct ipv6hdr
);
301 skb_reset_network_header(skb
);
302 skb_put(skb
, sizeof(struct ipv6hdr
));
305 *(__be32
*)hdr
= htonl(0x60000000);
307 hdr
->payload_len
= htons(len
);
308 hdr
->nexthdr
= proto
;
309 hdr
->hop_limit
= np
->hop_limit
;
311 ipv6_addr_copy(&hdr
->saddr
, saddr
);
312 ipv6_addr_copy(&hdr
->daddr
, daddr
);
317 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
319 struct ip6_ra_chain
*ra
;
320 struct sock
*last
= NULL
;
322 read_lock(&ip6_ra_lock
);
323 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
324 struct sock
*sk
= ra
->sk
;
325 if (sk
&& ra
->sel
== sel
&&
326 (!sk
->sk_bound_dev_if
||
327 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
329 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
331 rawv6_rcv(last
, skb2
);
338 rawv6_rcv(last
, skb
);
339 read_unlock(&ip6_ra_lock
);
342 read_unlock(&ip6_ra_lock
);
346 static int ip6_forward_proxy_check(struct sk_buff
*skb
)
348 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
349 u8 nexthdr
= hdr
->nexthdr
;
352 if (ipv6_ext_hdr(nexthdr
)) {
353 offset
= ipv6_skip_exthdr(skb
, sizeof(*hdr
), &nexthdr
);
357 offset
= sizeof(struct ipv6hdr
);
359 if (nexthdr
== IPPROTO_ICMPV6
) {
360 struct icmp6hdr
*icmp6
;
362 if (!pskb_may_pull(skb
, (skb_network_header(skb
) +
363 offset
+ 1 - skb
->data
)))
366 icmp6
= (struct icmp6hdr
*)(skb_network_header(skb
) + offset
);
368 switch (icmp6
->icmp6_type
) {
369 case NDISC_ROUTER_SOLICITATION
:
370 case NDISC_ROUTER_ADVERTISEMENT
:
371 case NDISC_NEIGHBOUR_SOLICITATION
:
372 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
374 /* For reaction involving unicast neighbor discovery
375 * message destined to the proxied address, pass it to
385 * The proxying router can't forward traffic sent to a link-local
386 * address, so signal the sender and discard the packet. This
387 * behavior is clarified by the MIPv6 specification.
389 if (ipv6_addr_type(&hdr
->daddr
) & IPV6_ADDR_LINKLOCAL
) {
390 dst_link_failure(skb
);
397 static inline int ip6_forward_finish(struct sk_buff
*skb
)
399 return dst_output(skb
);
402 int ip6_forward(struct sk_buff
*skb
)
404 struct dst_entry
*dst
= skb
->dst
;
405 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
406 struct inet6_skb_parm
*opt
= IP6CB(skb
);
408 if (ipv6_devconf
.forwarding
== 0)
411 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
412 IP6_INC_STATS(ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
416 skb_forward_csum(skb
);
419 * We DO NOT make any processing on
420 * RA packets, pushing them to user level AS IS
421 * without ane WARRANTY that application will be able
422 * to interpret them. The reason is that we
423 * cannot make anything clever here.
425 * We are not end-node, so that if packet contains
426 * AH/ESP, we cannot make anything.
427 * Defragmentation also would be mistake, RA packets
428 * cannot be fragmented, because there is no warranty
429 * that different fragments will go along one path. --ANK
432 u8
*ptr
= skb_network_header(skb
) + opt
->ra
;
433 if (ip6_call_ra_chain(skb
, (ptr
[2]<<8) + ptr
[3]))
438 * check and decrement ttl
440 if (hdr
->hop_limit
<= 1) {
441 /* Force OUTPUT device used as source address */
443 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
445 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_INHDRERRORS
);
451 /* XXX: idev->cnf.proxy_ndp? */
452 if (ipv6_devconf
.proxy_ndp
&&
453 pneigh_lookup(&nd_tbl
, &init_net
, &hdr
->daddr
, skb
->dev
, 0)) {
454 int proxied
= ip6_forward_proxy_check(skb
);
456 return ip6_input(skb
);
457 else if (proxied
< 0) {
458 IP6_INC_STATS(ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
463 if (!xfrm6_route_forward(skb
)) {
464 IP6_INC_STATS(ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
469 /* IPv6 specs say nothing about it, but it is clear that we cannot
470 send redirects to source routed frames.
471 We don't send redirects to frames decapsulated from IPsec.
473 if (skb
->dev
== dst
->dev
&& dst
->neighbour
&& opt
->srcrt
== 0 &&
475 struct in6_addr
*target
= NULL
;
477 struct neighbour
*n
= dst
->neighbour
;
480 * incoming and outgoing devices are the same
484 rt
= (struct rt6_info
*) dst
;
485 if ((rt
->rt6i_flags
& RTF_GATEWAY
))
486 target
= (struct in6_addr
*)&n
->primary_key
;
488 target
= &hdr
->daddr
;
490 /* Limit redirects both by destination (here)
491 and by source (inside ndisc_send_redirect)
493 if (xrlim_allow(dst
, 1*HZ
))
494 ndisc_send_redirect(skb
, n
, target
);
496 int addrtype
= ipv6_addr_type(&hdr
->saddr
);
498 /* This check is security critical. */
499 if (addrtype
& (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LOOPBACK
))
501 if (addrtype
& IPV6_ADDR_LINKLOCAL
) {
502 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
,
503 ICMPV6_NOT_NEIGHBOUR
, 0, skb
->dev
);
508 if (skb
->len
> dst_mtu(dst
)) {
509 /* Again, force OUTPUT device used as source address */
511 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, dst_mtu(dst
), skb
->dev
);
512 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_INTOOBIGERRORS
);
513 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_FRAGFAILS
);
518 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
519 IP6_INC_STATS(ip6_dst_idev(dst
), IPSTATS_MIB_OUTDISCARDS
);
525 /* Mangling hops number delayed to point after skb COW */
529 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
530 return NF_HOOK(PF_INET6
, NF_INET_FORWARD
, skb
, skb
->dev
, dst
->dev
,
534 IP6_INC_STATS_BH(ip6_dst_idev(dst
), IPSTATS_MIB_INADDRERRORS
);
540 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
542 to
->pkt_type
= from
->pkt_type
;
543 to
->priority
= from
->priority
;
544 to
->protocol
= from
->protocol
;
545 dst_release(to
->dst
);
546 to
->dst
= dst_clone(from
->dst
);
548 to
->mark
= from
->mark
;
550 #ifdef CONFIG_NET_SCHED
551 to
->tc_index
= from
->tc_index
;
554 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
555 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
556 to
->nf_trace
= from
->nf_trace
;
558 skb_copy_secmark(to
, from
);
561 int ip6_find_1stfragopt(struct sk_buff
*skb
, u8
**nexthdr
)
563 u16 offset
= sizeof(struct ipv6hdr
);
564 struct ipv6_opt_hdr
*exthdr
=
565 (struct ipv6_opt_hdr
*)(ipv6_hdr(skb
) + 1);
566 unsigned int packet_len
= skb
->tail
- skb
->network_header
;
568 *nexthdr
= &ipv6_hdr(skb
)->nexthdr
;
570 while (offset
+ 1 <= packet_len
) {
576 case NEXTHDR_ROUTING
:
580 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
581 if (ipv6_find_tlv(skb
, offset
, IPV6_TLV_HAO
) >= 0)
591 offset
+= ipv6_optlen(exthdr
);
592 *nexthdr
= &exthdr
->nexthdr
;
593 exthdr
= (struct ipv6_opt_hdr
*)(skb_network_header(skb
) +
599 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt
);
601 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
603 struct net_device
*dev
;
604 struct sk_buff
*frag
;
605 struct rt6_info
*rt
= (struct rt6_info
*)skb
->dst
;
606 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
607 struct ipv6hdr
*tmp_hdr
;
609 unsigned int mtu
, hlen
, left
, len
;
611 int ptr
, offset
= 0, err
=0;
612 u8
*prevhdr
, nexthdr
= 0;
615 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
618 mtu
= ip6_skb_dst_mtu(skb
);
620 /* We must not fragment if the socket is set to force MTU discovery
621 * or if the skb it not generated by a local socket. (This last
622 * check should be redundant, but it's free.)
624 if (!np
|| np
->pmtudisc
>= IPV6_PMTUDISC_DO
) {
625 skb
->dev
= skb
->dst
->dev
;
626 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, skb
->dev
);
627 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_FRAGFAILS
);
632 if (np
&& np
->frag_size
< mtu
) {
636 mtu
-= hlen
+ sizeof(struct frag_hdr
);
638 if (skb_shinfo(skb
)->frag_list
) {
639 int first_len
= skb_pagelen(skb
);
642 if (first_len
- hlen
> mtu
||
643 ((first_len
- hlen
) & 7) ||
647 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
) {
648 /* Correct geometry. */
649 if (frag
->len
> mtu
||
650 ((frag
->len
& 7) && frag
->next
) ||
651 skb_headroom(frag
) < hlen
)
654 /* Partially cloned skb? */
655 if (skb_shared(frag
))
662 frag
->destructor
= sock_wfree
;
663 truesizes
+= frag
->truesize
;
669 frag
= skb_shinfo(skb
)->frag_list
;
670 skb_shinfo(skb
)->frag_list
= NULL
;
673 *prevhdr
= NEXTHDR_FRAGMENT
;
674 tmp_hdr
= kmemdup(skb_network_header(skb
), hlen
, GFP_ATOMIC
);
676 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_FRAGFAILS
);
680 __skb_pull(skb
, hlen
);
681 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
682 __skb_push(skb
, hlen
);
683 skb_reset_network_header(skb
);
684 memcpy(skb_network_header(skb
), tmp_hdr
, hlen
);
686 ipv6_select_ident(skb
, fh
);
687 fh
->nexthdr
= nexthdr
;
689 fh
->frag_off
= htons(IP6_MF
);
690 frag_id
= fh
->identification
;
692 first_len
= skb_pagelen(skb
);
693 skb
->data_len
= first_len
- skb_headlen(skb
);
694 skb
->truesize
-= truesizes
;
695 skb
->len
= first_len
;
696 ipv6_hdr(skb
)->payload_len
= htons(first_len
-
697 sizeof(struct ipv6hdr
));
699 dst_hold(&rt
->u
.dst
);
702 /* Prepare header of the next frame,
703 * before previous one went down. */
705 frag
->ip_summed
= CHECKSUM_NONE
;
706 skb_reset_transport_header(frag
);
707 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
708 __skb_push(frag
, hlen
);
709 skb_reset_network_header(frag
);
710 memcpy(skb_network_header(frag
), tmp_hdr
,
712 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
713 fh
->nexthdr
= nexthdr
;
715 fh
->frag_off
= htons(offset
);
716 if (frag
->next
!= NULL
)
717 fh
->frag_off
|= htons(IP6_MF
);
718 fh
->identification
= frag_id
;
719 ipv6_hdr(frag
)->payload_len
=
721 sizeof(struct ipv6hdr
));
722 ip6_copy_metadata(frag
, skb
);
727 IP6_INC_STATS(ip6_dst_idev(&rt
->u
.dst
), IPSTATS_MIB_FRAGCREATES
);
740 IP6_INC_STATS(ip6_dst_idev(&rt
->u
.dst
), IPSTATS_MIB_FRAGOKS
);
741 dst_release(&rt
->u
.dst
);
751 IP6_INC_STATS(ip6_dst_idev(&rt
->u
.dst
), IPSTATS_MIB_FRAGFAILS
);
752 dst_release(&rt
->u
.dst
);
757 left
= skb
->len
- hlen
; /* Space per frame */
758 ptr
= hlen
; /* Where to start from */
761 * Fragment the datagram.
764 *prevhdr
= NEXTHDR_FRAGMENT
;
767 * Keep copying data until we run out.
771 /* IF: it doesn't fit, use 'mtu' - the data space left */
774 /* IF: we are not sending upto and including the packet end
775 then align the next start on an eight byte boundary */
783 if ((frag
= alloc_skb(len
+hlen
+sizeof(struct frag_hdr
)+LL_RESERVED_SPACE(rt
->u
.dst
.dev
), GFP_ATOMIC
)) == NULL
) {
784 NETDEBUG(KERN_INFO
"IPv6: frag: no memory for new fragment!\n");
785 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
786 IPSTATS_MIB_FRAGFAILS
);
792 * Set up data on packet
795 ip6_copy_metadata(frag
, skb
);
796 skb_reserve(frag
, LL_RESERVED_SPACE(rt
->u
.dst
.dev
));
797 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
798 skb_reset_network_header(frag
);
799 fh
= (struct frag_hdr
*)(skb_network_header(frag
) + hlen
);
800 frag
->transport_header
= (frag
->network_header
+ hlen
+
801 sizeof(struct frag_hdr
));
804 * Charge the memory for the fragment to any owner
808 skb_set_owner_w(frag
, skb
->sk
);
811 * Copy the packet header into the new buffer.
813 skb_copy_from_linear_data(skb
, skb_network_header(frag
), hlen
);
816 * Build fragment header.
818 fh
->nexthdr
= nexthdr
;
821 ipv6_select_ident(skb
, fh
);
822 frag_id
= fh
->identification
;
824 fh
->identification
= frag_id
;
827 * Copy a block of the IP datagram.
829 if (skb_copy_bits(skb
, ptr
, skb_transport_header(frag
), len
))
833 fh
->frag_off
= htons(offset
);
835 fh
->frag_off
|= htons(IP6_MF
);
836 ipv6_hdr(frag
)->payload_len
= htons(frag
->len
-
837 sizeof(struct ipv6hdr
));
843 * Put this fragment into the sending queue.
849 IP6_INC_STATS(ip6_dst_idev(skb
->dst
), IPSTATS_MIB_FRAGCREATES
);
851 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
852 IPSTATS_MIB_FRAGOKS
);
857 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
858 IPSTATS_MIB_FRAGFAILS
);
863 static inline int ip6_rt_check(struct rt6key
*rt_key
,
864 struct in6_addr
*fl_addr
,
865 struct in6_addr
*addr_cache
)
867 return ((rt_key
->plen
!= 128 || !ipv6_addr_equal(fl_addr
, &rt_key
->addr
)) &&
868 (addr_cache
== NULL
|| !ipv6_addr_equal(fl_addr
, addr_cache
)));
871 static struct dst_entry
*ip6_sk_dst_check(struct sock
*sk
,
872 struct dst_entry
*dst
,
875 struct ipv6_pinfo
*np
= inet6_sk(sk
);
876 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
881 /* Yes, checking route validity in not connected
882 * case is not very simple. Take into account,
883 * that we do not support routing by source, TOS,
884 * and MSG_DONTROUTE --ANK (980726)
886 * 1. ip6_rt_check(): If route was host route,
887 * check that cached destination is current.
888 * If it is network route, we still may
889 * check its validity using saved pointer
890 * to the last used address: daddr_cache.
891 * We do not want to save whole address now,
892 * (because main consumer of this service
893 * is tcp, which has not this problem),
894 * so that the last trick works only on connected
896 * 2. oif also should be the same.
898 if (ip6_rt_check(&rt
->rt6i_dst
, &fl
->fl6_dst
, np
->daddr_cache
) ||
899 #ifdef CONFIG_IPV6_SUBTREES
900 ip6_rt_check(&rt
->rt6i_src
, &fl
->fl6_src
, np
->saddr_cache
) ||
902 (fl
->oif
&& fl
->oif
!= dst
->dev
->ifindex
)) {
911 static int ip6_dst_lookup_tail(struct sock
*sk
,
912 struct dst_entry
**dst
, struct flowi
*fl
)
917 *dst
= ip6_route_output(sk
, fl
);
919 if ((err
= (*dst
)->error
))
920 goto out_err_release
;
922 if (ipv6_addr_any(&fl
->fl6_src
)) {
923 err
= ipv6_get_saddr(*dst
, &fl
->fl6_dst
, &fl
->fl6_src
);
925 goto out_err_release
;
928 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
930 * Here if the dst entry we've looked up
931 * has a neighbour entry that is in the INCOMPLETE
932 * state and the src address from the flow is
933 * marked as OPTIMISTIC, we release the found
934 * dst entry and replace it instead with the
935 * dst entry of the nexthop router
937 if (!((*dst
)->neighbour
->nud_state
& NUD_VALID
)) {
938 struct inet6_ifaddr
*ifp
;
942 ifp
= ipv6_get_ifaddr(&init_net
, &fl
->fl6_src
,
945 redirect
= (ifp
&& ifp
->flags
& IFA_F_OPTIMISTIC
);
951 * We need to get the dst entry for the
952 * default router instead
955 memcpy(&fl_gw
, fl
, sizeof(struct flowi
));
956 memset(&fl_gw
.fl6_dst
, 0, sizeof(struct in6_addr
));
957 *dst
= ip6_route_output(sk
, &fl_gw
);
958 if ((err
= (*dst
)->error
))
959 goto out_err_release
;
967 if (err
== -ENETUNREACH
)
968 IP6_INC_STATS_BH(NULL
, IPSTATS_MIB_OUTNOROUTES
);
975 * ip6_dst_lookup - perform route lookup on flow
976 * @sk: socket which provides route info
977 * @dst: pointer to dst_entry * for result
978 * @fl: flow to lookup
980 * This function performs a route lookup on the given flow.
982 * It returns zero on success, or a standard errno code on error.
984 int ip6_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
987 return ip6_dst_lookup_tail(sk
, dst
, fl
);
989 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
992 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
993 * @sk: socket which provides the dst cache and route info
994 * @dst: pointer to dst_entry * for result
995 * @fl: flow to lookup
997 * This function performs a route lookup on the given flow with the
998 * possibility of using the cached route in the socket if it is valid.
999 * It will take the socket dst lock when operating on the dst cache.
1000 * As a result, this function can only be used in process context.
1002 * It returns zero on success, or a standard errno code on error.
1004 int ip6_sk_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
1008 *dst
= sk_dst_check(sk
, inet6_sk(sk
)->dst_cookie
);
1009 *dst
= ip6_sk_dst_check(sk
, *dst
, fl
);
1012 return ip6_dst_lookup_tail(sk
, dst
, fl
);
1014 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup
);
1016 static inline int ip6_ufo_append_data(struct sock
*sk
,
1017 int getfrag(void *from
, char *to
, int offset
, int len
,
1018 int odd
, struct sk_buff
*skb
),
1019 void *from
, int length
, int hh_len
, int fragheaderlen
,
1020 int transhdrlen
, int mtu
,unsigned int flags
)
1023 struct sk_buff
*skb
;
1026 /* There is support for UDP large send offload by network
1027 * device, so create one single skb packet containing complete
1030 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
1031 skb
= sock_alloc_send_skb(sk
,
1032 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
1033 (flags
& MSG_DONTWAIT
), &err
);
1037 /* reserve space for Hardware header */
1038 skb_reserve(skb
, hh_len
);
1040 /* create space for UDP/IP header */
1041 skb_put(skb
,fragheaderlen
+ transhdrlen
);
1043 /* initialize network header pointer */
1044 skb_reset_network_header(skb
);
1046 /* initialize protocol header pointer */
1047 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
1049 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1051 sk
->sk_sndmsg_off
= 0;
1054 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
1055 (length
- transhdrlen
));
1057 struct frag_hdr fhdr
;
1059 /* specify the length of each IP datagram fragment*/
1060 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
-
1061 sizeof(struct frag_hdr
);
1062 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1063 ipv6_select_ident(skb
, &fhdr
);
1064 skb_shinfo(skb
)->ip6_frag_id
= fhdr
.identification
;
1065 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1069 /* There is not enough support do UPD LSO,
1070 * so follow normal path
1077 int ip6_append_data(struct sock
*sk
, int getfrag(void *from
, char *to
,
1078 int offset
, int len
, int odd
, struct sk_buff
*skb
),
1079 void *from
, int length
, int transhdrlen
,
1080 int hlimit
, int tclass
, struct ipv6_txoptions
*opt
, struct flowi
*fl
,
1081 struct rt6_info
*rt
, unsigned int flags
)
1083 struct inet_sock
*inet
= inet_sk(sk
);
1084 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1085 struct sk_buff
*skb
;
1086 unsigned int maxfraglen
, fragheaderlen
;
1093 int csummode
= CHECKSUM_NONE
;
1095 if (flags
&MSG_PROBE
)
1097 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1102 if (np
->cork
.opt
== NULL
) {
1103 np
->cork
.opt
= kmalloc(opt
->tot_len
,
1105 if (unlikely(np
->cork
.opt
== NULL
))
1107 } else if (np
->cork
.opt
->tot_len
< opt
->tot_len
) {
1108 printk(KERN_DEBUG
"ip6_append_data: invalid option length\n");
1111 memcpy(np
->cork
.opt
, opt
, opt
->tot_len
);
1112 inet
->cork
.flags
|= IPCORK_OPT
;
1113 /* need source address above miyazawa*/
1115 dst_hold(&rt
->u
.dst
);
1117 inet
->cork
.fl
= *fl
;
1118 np
->cork
.hop_limit
= hlimit
;
1119 np
->cork
.tclass
= tclass
;
1120 mtu
= np
->pmtudisc
== IPV6_PMTUDISC_PROBE
?
1121 rt
->u
.dst
.dev
->mtu
: dst_mtu(rt
->u
.dst
.path
);
1122 if (np
->frag_size
< mtu
) {
1124 mtu
= np
->frag_size
;
1126 inet
->cork
.fragsize
= mtu
;
1127 if (dst_allfrag(rt
->u
.dst
.path
))
1128 inet
->cork
.flags
|= IPCORK_ALLFRAG
;
1129 inet
->cork
.length
= 0;
1130 sk
->sk_sndmsg_page
= NULL
;
1131 sk
->sk_sndmsg_off
= 0;
1132 exthdrlen
= rt
->u
.dst
.header_len
+ (opt
? opt
->opt_flen
: 0) -
1133 rt
->rt6i_nfheader_len
;
1134 length
+= exthdrlen
;
1135 transhdrlen
+= exthdrlen
;
1138 fl
= &inet
->cork
.fl
;
1139 if (inet
->cork
.flags
& IPCORK_OPT
)
1143 mtu
= inet
->cork
.fragsize
;
1146 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
1148 fragheaderlen
= sizeof(struct ipv6hdr
) + rt
->rt6i_nfheader_len
+
1149 (opt
? opt
->opt_nflen
: 0);
1150 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
- sizeof(struct frag_hdr
);
1152 if (mtu
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
) {
1153 if (inet
->cork
.length
+ length
> sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
- fragheaderlen
) {
1154 ipv6_local_error(sk
, EMSGSIZE
, fl
, mtu
-exthdrlen
);
1160 * Let's try using as much space as possible.
1161 * Use MTU if total length of the message fits into the MTU.
1162 * Otherwise, we need to reserve fragment header and
1163 * fragment alignment (= 8-15 octects, in total).
1165 * Note that we may need to "move" the data from the tail of
1166 * of the buffer to the new fragment when we split
1169 * FIXME: It may be fragmented into multiple chunks
1170 * at once if non-fragmentable extension headers
1175 inet
->cork
.length
+= length
;
1176 if (((length
> mtu
) && (sk
->sk_protocol
== IPPROTO_UDP
)) &&
1177 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
1179 err
= ip6_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
1180 fragheaderlen
, transhdrlen
, mtu
,
1187 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1190 while (length
> 0) {
1191 /* Check if the remaining data fits into current packet. */
1192 copy
= (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
1194 copy
= maxfraglen
- skb
->len
;
1198 unsigned int datalen
;
1199 unsigned int fraglen
;
1200 unsigned int fraggap
;
1201 unsigned int alloclen
;
1202 struct sk_buff
*skb_prev
;
1206 /* There's no room in the current skb */
1208 fraggap
= skb_prev
->len
- maxfraglen
;
1213 * If remaining data exceeds the mtu,
1214 * we know we need more fragment(s).
1216 datalen
= length
+ fraggap
;
1217 if (datalen
> (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
1218 datalen
= maxfraglen
- fragheaderlen
;
1220 fraglen
= datalen
+ fragheaderlen
;
1221 if ((flags
& MSG_MORE
) &&
1222 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
1225 alloclen
= datalen
+ fragheaderlen
;
1228 * The last fragment gets additional space at tail.
1229 * Note: we overallocate on fragments with MSG_MODE
1230 * because we have no idea if we're the last one.
1232 if (datalen
== length
+ fraggap
)
1233 alloclen
+= rt
->u
.dst
.trailer_len
;
1236 * We just reserve space for fragment header.
1237 * Note: this may be overallocation if the message
1238 * (without MSG_MORE) fits into the MTU.
1240 alloclen
+= sizeof(struct frag_hdr
);
1243 skb
= sock_alloc_send_skb(sk
,
1245 (flags
& MSG_DONTWAIT
), &err
);
1248 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1250 skb
= sock_wmalloc(sk
,
1251 alloclen
+ hh_len
, 1,
1253 if (unlikely(skb
== NULL
))
1259 * Fill in the control structures
1261 skb
->ip_summed
= csummode
;
1263 /* reserve for fragmentation */
1264 skb_reserve(skb
, hh_len
+sizeof(struct frag_hdr
));
1267 * Find where to start putting bytes
1269 data
= skb_put(skb
, fraglen
);
1270 skb_set_network_header(skb
, exthdrlen
);
1271 data
+= fragheaderlen
;
1272 skb
->transport_header
= (skb
->network_header
+
1275 skb
->csum
= skb_copy_and_csum_bits(
1276 skb_prev
, maxfraglen
,
1277 data
+ transhdrlen
, fraggap
, 0);
1278 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1281 pskb_trim_unique(skb_prev
, maxfraglen
);
1283 copy
= datalen
- transhdrlen
- fraggap
;
1288 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1295 length
-= datalen
- fraggap
;
1298 csummode
= CHECKSUM_NONE
;
1301 * Put the packet on the pending queue
1303 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1310 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
1314 if (getfrag(from
, skb_put(skb
, copy
),
1315 offset
, copy
, off
, skb
) < 0) {
1316 __skb_trim(skb
, off
);
1321 int i
= skb_shinfo(skb
)->nr_frags
;
1322 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1323 struct page
*page
= sk
->sk_sndmsg_page
;
1324 int off
= sk
->sk_sndmsg_off
;
1327 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1330 if (page
!= frag
->page
) {
1331 if (i
== MAX_SKB_FRAGS
) {
1336 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1337 frag
= &skb_shinfo(skb
)->frags
[i
];
1339 } else if(i
< MAX_SKB_FRAGS
) {
1340 if (copy
> PAGE_SIZE
)
1342 page
= alloc_pages(sk
->sk_allocation
, 0);
1347 sk
->sk_sndmsg_page
= page
;
1348 sk
->sk_sndmsg_off
= 0;
1350 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1351 frag
= &skb_shinfo(skb
)->frags
[i
];
1356 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1360 sk
->sk_sndmsg_off
+= copy
;
1363 skb
->data_len
+= copy
;
1364 skb
->truesize
+= copy
;
1365 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1372 inet
->cork
.length
-= length
;
1373 IP6_INC_STATS(rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1377 static void ip6_cork_release(struct inet_sock
*inet
, struct ipv6_pinfo
*np
)
1379 inet
->cork
.flags
&= ~IPCORK_OPT
;
1380 kfree(np
->cork
.opt
);
1381 np
->cork
.opt
= NULL
;
1383 dst_release(&np
->cork
.rt
->u
.dst
);
1385 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1387 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));
1390 int ip6_push_pending_frames(struct sock
*sk
)
1392 struct sk_buff
*skb
, *tmp_skb
;
1393 struct sk_buff
**tail_skb
;
1394 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1395 struct inet_sock
*inet
= inet_sk(sk
);
1396 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1397 struct ipv6hdr
*hdr
;
1398 struct ipv6_txoptions
*opt
= np
->cork
.opt
;
1399 struct rt6_info
*rt
= np
->cork
.rt
;
1400 struct flowi
*fl
= &inet
->cork
.fl
;
1401 unsigned char proto
= fl
->proto
;
1404 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1406 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1408 /* move skb->data to ip header from ext header */
1409 if (skb
->data
< skb_network_header(skb
))
1410 __skb_pull(skb
, skb_network_offset(skb
));
1411 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1412 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1413 *tail_skb
= tmp_skb
;
1414 tail_skb
= &(tmp_skb
->next
);
1415 skb
->len
+= tmp_skb
->len
;
1416 skb
->data_len
+= tmp_skb
->len
;
1417 skb
->truesize
+= tmp_skb
->truesize
;
1418 __sock_put(tmp_skb
->sk
);
1419 tmp_skb
->destructor
= NULL
;
1423 ipv6_addr_copy(final_dst
, &fl
->fl6_dst
);
1424 __skb_pull(skb
, skb_network_header_len(skb
));
1425 if (opt
&& opt
->opt_flen
)
1426 ipv6_push_frag_opts(skb
, opt
, &proto
);
1427 if (opt
&& opt
->opt_nflen
)
1428 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1430 skb_push(skb
, sizeof(struct ipv6hdr
));
1431 skb_reset_network_header(skb
);
1432 hdr
= ipv6_hdr(skb
);
1434 *(__be32
*)hdr
= fl
->fl6_flowlabel
|
1435 htonl(0x60000000 | ((int)np
->cork
.tclass
<< 20));
1437 hdr
->hop_limit
= np
->cork
.hop_limit
;
1438 hdr
->nexthdr
= proto
;
1439 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
1440 ipv6_addr_copy(&hdr
->daddr
, final_dst
);
1442 skb
->priority
= sk
->sk_priority
;
1443 skb
->mark
= sk
->sk_mark
;
1445 skb
->dst
= dst_clone(&rt
->u
.dst
);
1446 IP6_INC_STATS(rt
->rt6i_idev
, IPSTATS_MIB_OUTREQUESTS
);
1447 if (proto
== IPPROTO_ICMPV6
) {
1448 struct inet6_dev
*idev
= ip6_dst_idev(skb
->dst
);
1450 ICMP6MSGOUT_INC_STATS_BH(idev
, icmp6_hdr(skb
)->icmp6_type
);
1451 ICMP6_INC_STATS_BH(idev
, ICMP6_MIB_OUTMSGS
);
1454 err
= ip6_local_out(skb
);
1457 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1463 ip6_cork_release(inet
, np
);
1469 void ip6_flush_pending_frames(struct sock
*sk
)
1471 struct sk_buff
*skb
;
1473 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
) {
1475 IP6_INC_STATS(ip6_dst_idev(skb
->dst
),
1476 IPSTATS_MIB_OUTDISCARDS
);
1480 ip6_cork_release(inet_sk(sk
), inet6_sk(sk
));