2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58 #include <net/l3mdev.h>
60 static int ip6_finish_output2(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
62 struct dst_entry
*dst
= skb_dst(skb
);
63 struct net_device
*dev
= dst
->dev
;
64 struct neighbour
*neigh
;
65 struct in6_addr
*nexthop
;
68 skb
->protocol
= htons(ETH_P_IPV6
);
71 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
)) {
72 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
74 if (!(dev
->flags
& IFF_LOOPBACK
) && sk_mc_loop(sk
) &&
75 ((mroute6_socket(net
, skb
) &&
76 !(IP6CB(skb
)->flags
& IP6SKB_FORWARDED
)) ||
77 ipv6_chk_mcast_addr(dev
, &ipv6_hdr(skb
)->daddr
,
78 &ipv6_hdr(skb
)->saddr
))) {
79 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
81 /* Do not check for IFF_ALLMULTI; multicast routing
82 is not supported in any case.
85 NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
86 net
, sk
, newskb
, NULL
, newskb
->dev
,
89 if (ipv6_hdr(skb
)->hop_limit
== 0) {
90 IP6_INC_STATS(net
, idev
,
91 IPSTATS_MIB_OUTDISCARDS
);
97 IP6_UPD_PO_STATS(net
, idev
, IPSTATS_MIB_OUTMCAST
, skb
->len
);
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb
)->daddr
) <=
100 IPV6_ADDR_SCOPE_NODELOCAL
&&
101 !(dev
->flags
& IFF_LOOPBACK
)) {
108 nexthop
= rt6_nexthop((struct rt6_info
*)dst
, &ipv6_hdr(skb
)->daddr
);
109 neigh
= __ipv6_neigh_lookup_noref(dst
->dev
, nexthop
);
110 if (unlikely(!neigh
))
111 neigh
= __neigh_create(&nd_tbl
, nexthop
, dst
->dev
, false);
112 if (!IS_ERR(neigh
)) {
113 ret
= dst_neigh_output(dst
, neigh
, skb
);
114 rcu_read_unlock_bh();
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
124 static int ip6_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
126 if ((skb
->len
> ip6_skb_dst_mtu(skb
) && !skb_is_gso(skb
)) ||
127 dst_allfrag(skb_dst(skb
)) ||
128 (IP6CB(skb
)->frag_max_size
&& skb
->len
> IP6CB(skb
)->frag_max_size
))
129 return ip6_fragment(net
, sk
, skb
, ip6_finish_output2
);
131 return ip6_finish_output2(net
, sk
, skb
);
134 int ip6_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
136 struct net_device
*dev
= skb_dst(skb
)->dev
;
137 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
139 if (unlikely(idev
->cnf
.disable_ipv6
)) {
140 IP6_INC_STATS(net
, idev
, IPSTATS_MIB_OUTDISCARDS
);
145 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
146 net
, sk
, skb
, NULL
, dev
,
148 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
152 * xmit an sk_buff (used by TCP, SCTP and DCCP)
153 * Note : socket lock is not held for SYNACK packets, but might be modified
154 * by calls to skb_set_owner_w() and ipv6_local_error(),
155 * which are using proper atomic operations or spinlocks.
157 int ip6_xmit(const struct sock
*sk
, struct sk_buff
*skb
, struct flowi6
*fl6
,
158 struct ipv6_txoptions
*opt
, int tclass
)
160 struct net
*net
= sock_net(sk
);
161 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
162 struct in6_addr
*first_hop
= &fl6
->daddr
;
163 struct dst_entry
*dst
= skb_dst(skb
);
165 u8 proto
= fl6
->flowi6_proto
;
166 int seg_len
= skb
->len
;
171 unsigned int head_room
;
173 /* First: exthdrs may take lots of space (~8K for now)
174 MAX_HEADER is not enough.
176 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
177 seg_len
+= head_room
;
178 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
180 if (skb_headroom(skb
) < head_room
) {
181 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
183 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
184 IPSTATS_MIB_OUTDISCARDS
);
190 /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
191 * it is safe to call in our context (socket lock not held)
193 skb_set_owner_w(skb
, (struct sock
*)sk
);
196 ipv6_push_frag_opts(skb
, opt
, &proto
);
198 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
201 skb_push(skb
, sizeof(struct ipv6hdr
));
202 skb_reset_network_header(skb
);
206 * Fill in the IPv6 header
209 hlimit
= np
->hop_limit
;
211 hlimit
= ip6_dst_hoplimit(dst
);
213 ip6_flow_hdr(hdr
, tclass
, ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
,
214 np
->autoflowlabel
, fl6
));
216 hdr
->payload_len
= htons(seg_len
);
217 hdr
->nexthdr
= proto
;
218 hdr
->hop_limit
= hlimit
;
220 hdr
->saddr
= fl6
->saddr
;
221 hdr
->daddr
= *first_hop
;
223 skb
->protocol
= htons(ETH_P_IPV6
);
224 skb
->priority
= sk
->sk_priority
;
225 skb
->mark
= sk
->sk_mark
;
228 if ((skb
->len
<= mtu
) || skb
->ignore_df
|| skb_is_gso(skb
)) {
229 IP6_UPD_PO_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
230 IPSTATS_MIB_OUT
, skb
->len
);
231 /* hooks should never assume socket lock is held.
232 * we promote our socket to non const
234 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
,
235 net
, (struct sock
*)sk
, skb
, NULL
, dst
->dev
,
240 /* ipv6_local_error() does not require socket lock,
241 * we promote our socket to non const
243 ipv6_local_error((struct sock
*)sk
, EMSGSIZE
, fl6
, mtu
);
245 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_FRAGFAILS
);
249 EXPORT_SYMBOL(ip6_xmit
);
251 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
253 struct ip6_ra_chain
*ra
;
254 struct sock
*last
= NULL
;
256 read_lock(&ip6_ra_lock
);
257 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
258 struct sock
*sk
= ra
->sk
;
259 if (sk
&& ra
->sel
== sel
&&
260 (!sk
->sk_bound_dev_if
||
261 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
263 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
265 rawv6_rcv(last
, skb2
);
272 rawv6_rcv(last
, skb
);
273 read_unlock(&ip6_ra_lock
);
276 read_unlock(&ip6_ra_lock
);
280 static int ip6_forward_proxy_check(struct sk_buff
*skb
)
282 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
283 u8 nexthdr
= hdr
->nexthdr
;
287 if (ipv6_ext_hdr(nexthdr
)) {
288 offset
= ipv6_skip_exthdr(skb
, sizeof(*hdr
), &nexthdr
, &frag_off
);
292 offset
= sizeof(struct ipv6hdr
);
294 if (nexthdr
== IPPROTO_ICMPV6
) {
295 struct icmp6hdr
*icmp6
;
297 if (!pskb_may_pull(skb
, (skb_network_header(skb
) +
298 offset
+ 1 - skb
->data
)))
301 icmp6
= (struct icmp6hdr
*)(skb_network_header(skb
) + offset
);
303 switch (icmp6
->icmp6_type
) {
304 case NDISC_ROUTER_SOLICITATION
:
305 case NDISC_ROUTER_ADVERTISEMENT
:
306 case NDISC_NEIGHBOUR_SOLICITATION
:
307 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
309 /* For reaction involving unicast neighbor discovery
310 * message destined to the proxied address, pass it to
320 * The proxying router can't forward traffic sent to a link-local
321 * address, so signal the sender and discard the packet. This
322 * behavior is clarified by the MIPv6 specification.
324 if (ipv6_addr_type(&hdr
->daddr
) & IPV6_ADDR_LINKLOCAL
) {
325 dst_link_failure(skb
);
332 static inline int ip6_forward_finish(struct net
*net
, struct sock
*sk
,
335 return dst_output(net
, sk
, skb
);
338 static unsigned int ip6_dst_mtu_forward(const struct dst_entry
*dst
)
341 struct inet6_dev
*idev
;
343 if (dst_metric_locked(dst
, RTAX_MTU
)) {
344 mtu
= dst_metric_raw(dst
, RTAX_MTU
);
351 idev
= __in6_dev_get(dst
->dev
);
353 mtu
= idev
->cnf
.mtu6
;
359 static bool ip6_pkt_too_big(const struct sk_buff
*skb
, unsigned int mtu
)
364 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
365 if (IP6CB(skb
)->frag_max_size
&& IP6CB(skb
)->frag_max_size
> mtu
)
371 if (skb_is_gso(skb
) && skb_gso_validate_mtu(skb
, mtu
))
377 int ip6_forward(struct sk_buff
*skb
)
379 struct dst_entry
*dst
= skb_dst(skb
);
380 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
381 struct inet6_skb_parm
*opt
= IP6CB(skb
);
382 struct net
*net
= dev_net(dst
->dev
);
385 if (net
->ipv6
.devconf_all
->forwarding
== 0)
388 if (skb
->pkt_type
!= PACKET_HOST
)
391 if (unlikely(skb
->sk
))
394 if (skb_warn_if_lro(skb
))
397 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
398 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
399 IPSTATS_MIB_INDISCARDS
);
403 skb_forward_csum(skb
);
406 * We DO NOT make any processing on
407 * RA packets, pushing them to user level AS IS
408 * without ane WARRANTY that application will be able
409 * to interpret them. The reason is that we
410 * cannot make anything clever here.
412 * We are not end-node, so that if packet contains
413 * AH/ESP, we cannot make anything.
414 * Defragmentation also would be mistake, RA packets
415 * cannot be fragmented, because there is no warranty
416 * that different fragments will go along one path. --ANK
418 if (unlikely(opt
->flags
& IP6SKB_ROUTERALERT
)) {
419 if (ip6_call_ra_chain(skb
, ntohs(opt
->ra
)))
424 * check and decrement ttl
426 if (hdr
->hop_limit
<= 1) {
427 /* Force OUTPUT device used as source address */
429 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
, 0);
430 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
431 IPSTATS_MIB_INHDRERRORS
);
437 /* XXX: idev->cnf.proxy_ndp? */
438 if (net
->ipv6
.devconf_all
->proxy_ndp
&&
439 pneigh_lookup(&nd_tbl
, net
, &hdr
->daddr
, skb
->dev
, 0)) {
440 int proxied
= ip6_forward_proxy_check(skb
);
442 return ip6_input(skb
);
443 else if (proxied
< 0) {
444 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
445 IPSTATS_MIB_INDISCARDS
);
450 if (!xfrm6_route_forward(skb
)) {
451 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
452 IPSTATS_MIB_INDISCARDS
);
457 /* IPv6 specs say nothing about it, but it is clear that we cannot
458 send redirects to source routed frames.
459 We don't send redirects to frames decapsulated from IPsec.
461 if (skb
->dev
== dst
->dev
&& opt
->srcrt
== 0 && !skb_sec_path(skb
)) {
462 struct in6_addr
*target
= NULL
;
463 struct inet_peer
*peer
;
467 * incoming and outgoing devices are the same
471 rt
= (struct rt6_info
*) dst
;
472 if (rt
->rt6i_flags
& RTF_GATEWAY
)
473 target
= &rt
->rt6i_gateway
;
475 target
= &hdr
->daddr
;
477 peer
= inet_getpeer_v6(net
->ipv6
.peers
, &hdr
->daddr
, 1);
479 /* Limit redirects both by destination (here)
480 and by source (inside ndisc_send_redirect)
482 if (inet_peer_xrlim_allow(peer
, 1*HZ
))
483 ndisc_send_redirect(skb
, target
);
487 int addrtype
= ipv6_addr_type(&hdr
->saddr
);
489 /* This check is security critical. */
490 if (addrtype
== IPV6_ADDR_ANY
||
491 addrtype
& (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LOOPBACK
))
493 if (addrtype
& IPV6_ADDR_LINKLOCAL
) {
494 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
,
495 ICMPV6_NOT_NEIGHBOUR
, 0);
500 mtu
= ip6_dst_mtu_forward(dst
);
501 if (mtu
< IPV6_MIN_MTU
)
504 if (ip6_pkt_too_big(skb
, mtu
)) {
505 /* Again, force OUTPUT device used as source address */
507 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
508 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
509 IPSTATS_MIB_INTOOBIGERRORS
);
510 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
511 IPSTATS_MIB_FRAGFAILS
);
516 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
517 __IP6_INC_STATS(net
, ip6_dst_idev(dst
),
518 IPSTATS_MIB_OUTDISCARDS
);
524 /* Mangling hops number delayed to point after skb COW */
528 __IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
529 __IP6_ADD_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTOCTETS
, skb
->len
);
530 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
,
531 net
, NULL
, skb
, skb
->dev
, dst
->dev
,
535 __IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INADDRERRORS
);
541 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
543 to
->pkt_type
= from
->pkt_type
;
544 to
->priority
= from
->priority
;
545 to
->protocol
= from
->protocol
;
547 skb_dst_set(to
, dst_clone(skb_dst(from
)));
549 to
->mark
= from
->mark
;
551 #ifdef CONFIG_NET_SCHED
552 to
->tc_index
= from
->tc_index
;
555 skb_copy_secmark(to
, from
);
558 int ip6_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
559 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
561 struct sk_buff
*frag
;
562 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
563 struct ipv6_pinfo
*np
= skb
->sk
&& !dev_recursion_level() ?
564 inet6_sk(skb
->sk
) : NULL
;
565 struct ipv6hdr
*tmp_hdr
;
567 unsigned int mtu
, hlen
, left
, len
;
570 int ptr
, offset
= 0, err
= 0;
571 u8
*prevhdr
, nexthdr
= 0;
573 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
576 mtu
= ip6_skb_dst_mtu(skb
);
578 /* We must not fragment if the socket is set to force MTU discovery
579 * or if the skb it not generated by a local socket.
581 if (unlikely(!skb
->ignore_df
&& skb
->len
> mtu
))
584 if (IP6CB(skb
)->frag_max_size
) {
585 if (IP6CB(skb
)->frag_max_size
> mtu
)
588 /* don't send fragments larger than what we received */
589 mtu
= IP6CB(skb
)->frag_max_size
;
590 if (mtu
< IPV6_MIN_MTU
)
594 if (np
&& np
->frag_size
< mtu
) {
598 if (mtu
< hlen
+ sizeof(struct frag_hdr
) + 8)
600 mtu
-= hlen
+ sizeof(struct frag_hdr
);
602 frag_id
= ipv6_select_ident(net
, &ipv6_hdr(skb
)->daddr
,
603 &ipv6_hdr(skb
)->saddr
);
605 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
606 (err
= skb_checksum_help(skb
)))
609 hroom
= LL_RESERVED_SPACE(rt
->dst
.dev
);
610 if (skb_has_frag_list(skb
)) {
611 int first_len
= skb_pagelen(skb
);
612 struct sk_buff
*frag2
;
614 if (first_len
- hlen
> mtu
||
615 ((first_len
- hlen
) & 7) ||
617 skb_headroom(skb
) < (hroom
+ sizeof(struct frag_hdr
)))
620 skb_walk_frags(skb
, frag
) {
621 /* Correct geometry. */
622 if (frag
->len
> mtu
||
623 ((frag
->len
& 7) && frag
->next
) ||
624 skb_headroom(frag
) < (hlen
+ hroom
+ sizeof(struct frag_hdr
)))
625 goto slow_path_clean
;
627 /* Partially cloned skb? */
628 if (skb_shared(frag
))
629 goto slow_path_clean
;
634 frag
->destructor
= sock_wfree
;
636 skb
->truesize
-= frag
->truesize
;
643 *prevhdr
= NEXTHDR_FRAGMENT
;
644 tmp_hdr
= kmemdup(skb_network_header(skb
), hlen
, GFP_ATOMIC
);
646 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
647 IPSTATS_MIB_FRAGFAILS
);
651 frag
= skb_shinfo(skb
)->frag_list
;
652 skb_frag_list_init(skb
);
654 __skb_pull(skb
, hlen
);
655 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
656 __skb_push(skb
, hlen
);
657 skb_reset_network_header(skb
);
658 memcpy(skb_network_header(skb
), tmp_hdr
, hlen
);
660 fh
->nexthdr
= nexthdr
;
662 fh
->frag_off
= htons(IP6_MF
);
663 fh
->identification
= frag_id
;
665 first_len
= skb_pagelen(skb
);
666 skb
->data_len
= first_len
- skb_headlen(skb
);
667 skb
->len
= first_len
;
668 ipv6_hdr(skb
)->payload_len
= htons(first_len
-
669 sizeof(struct ipv6hdr
));
674 /* Prepare header of the next frame,
675 * before previous one went down. */
677 frag
->ip_summed
= CHECKSUM_NONE
;
678 skb_reset_transport_header(frag
);
679 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
680 __skb_push(frag
, hlen
);
681 skb_reset_network_header(frag
);
682 memcpy(skb_network_header(frag
), tmp_hdr
,
684 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
685 fh
->nexthdr
= nexthdr
;
687 fh
->frag_off
= htons(offset
);
689 fh
->frag_off
|= htons(IP6_MF
);
690 fh
->identification
= frag_id
;
691 ipv6_hdr(frag
)->payload_len
=
693 sizeof(struct ipv6hdr
));
694 ip6_copy_metadata(frag
, skb
);
697 err
= output(net
, sk
, skb
);
699 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
700 IPSTATS_MIB_FRAGCREATES
);
713 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
714 IPSTATS_MIB_FRAGOKS
);
719 kfree_skb_list(frag
);
721 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
722 IPSTATS_MIB_FRAGFAILS
);
727 skb_walk_frags(skb
, frag2
) {
731 frag2
->destructor
= NULL
;
732 skb
->truesize
+= frag2
->truesize
;
737 left
= skb
->len
- hlen
; /* Space per frame */
738 ptr
= hlen
; /* Where to start from */
741 * Fragment the datagram.
744 *prevhdr
= NEXTHDR_FRAGMENT
;
745 troom
= rt
->dst
.dev
->needed_tailroom
;
748 * Keep copying data until we run out.
752 /* IF: it doesn't fit, use 'mtu' - the data space left */
755 /* IF: we are not sending up to and including the packet end
756 then align the next start on an eight byte boundary */
761 /* Allocate buffer */
762 frag
= alloc_skb(len
+ hlen
+ sizeof(struct frag_hdr
) +
763 hroom
+ troom
, GFP_ATOMIC
);
765 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
766 IPSTATS_MIB_FRAGFAILS
);
772 * Set up data on packet
775 ip6_copy_metadata(frag
, skb
);
776 skb_reserve(frag
, hroom
);
777 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
778 skb_reset_network_header(frag
);
779 fh
= (struct frag_hdr
*)(skb_network_header(frag
) + hlen
);
780 frag
->transport_header
= (frag
->network_header
+ hlen
+
781 sizeof(struct frag_hdr
));
784 * Charge the memory for the fragment to any owner
788 skb_set_owner_w(frag
, skb
->sk
);
791 * Copy the packet header into the new buffer.
793 skb_copy_from_linear_data(skb
, skb_network_header(frag
), hlen
);
796 * Build fragment header.
798 fh
->nexthdr
= nexthdr
;
800 fh
->identification
= frag_id
;
803 * Copy a block of the IP datagram.
805 BUG_ON(skb_copy_bits(skb
, ptr
, skb_transport_header(frag
),
809 fh
->frag_off
= htons(offset
);
811 fh
->frag_off
|= htons(IP6_MF
);
812 ipv6_hdr(frag
)->payload_len
= htons(frag
->len
-
813 sizeof(struct ipv6hdr
));
819 * Put this fragment into the sending queue.
821 err
= output(net
, sk
, frag
);
825 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
826 IPSTATS_MIB_FRAGCREATES
);
828 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
829 IPSTATS_MIB_FRAGOKS
);
834 if (skb
->sk
&& dst_allfrag(skb_dst(skb
)))
835 sk_nocaps_add(skb
->sk
, NETIF_F_GSO_MASK
);
837 skb
->dev
= skb_dst(skb
)->dev
;
838 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
842 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
843 IPSTATS_MIB_FRAGFAILS
);
848 static inline int ip6_rt_check(const struct rt6key
*rt_key
,
849 const struct in6_addr
*fl_addr
,
850 const struct in6_addr
*addr_cache
)
852 return (rt_key
->plen
!= 128 || !ipv6_addr_equal(fl_addr
, &rt_key
->addr
)) &&
853 (!addr_cache
|| !ipv6_addr_equal(fl_addr
, addr_cache
));
856 static struct dst_entry
*ip6_sk_dst_check(struct sock
*sk
,
857 struct dst_entry
*dst
,
858 const struct flowi6
*fl6
)
860 struct ipv6_pinfo
*np
= inet6_sk(sk
);
866 if (dst
->ops
->family
!= AF_INET6
) {
871 rt
= (struct rt6_info
*)dst
;
872 /* Yes, checking route validity in not connected
873 * case is not very simple. Take into account,
874 * that we do not support routing by source, TOS,
875 * and MSG_DONTROUTE --ANK (980726)
877 * 1. ip6_rt_check(): If route was host route,
878 * check that cached destination is current.
879 * If it is network route, we still may
880 * check its validity using saved pointer
881 * to the last used address: daddr_cache.
882 * We do not want to save whole address now,
883 * (because main consumer of this service
884 * is tcp, which has not this problem),
885 * so that the last trick works only on connected
887 * 2. oif also should be the same.
889 if (ip6_rt_check(&rt
->rt6i_dst
, &fl6
->daddr
, np
->daddr_cache
) ||
890 #ifdef CONFIG_IPV6_SUBTREES
891 ip6_rt_check(&rt
->rt6i_src
, &fl6
->saddr
, np
->saddr_cache
) ||
893 (!(fl6
->flowi6_flags
& FLOWI_FLAG_SKIP_NH_OIF
) &&
894 (fl6
->flowi6_oif
&& fl6
->flowi6_oif
!= dst
->dev
->ifindex
))) {
903 static int ip6_dst_lookup_tail(struct net
*net
, const struct sock
*sk
,
904 struct dst_entry
**dst
, struct flowi6
*fl6
)
906 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
913 if (ipv6_addr_any(&fl6
->saddr
) && fl6
->flowi6_oif
&&
914 (!*dst
|| !(*dst
)->error
)) {
915 err
= l3mdev_get_saddr6(net
, sk
, fl6
);
920 /* The correct way to handle this would be to do
921 * ip6_route_get_saddr, and then ip6_route_output; however,
922 * the route-specific preferred source forces the
923 * ip6_route_output call _before_ ip6_route_get_saddr.
925 * In source specific routing (no src=any default route),
926 * ip6_route_output will fail given src=any saddr, though, so
927 * that's why we try it again later.
929 if (ipv6_addr_any(&fl6
->saddr
) && (!*dst
|| !(*dst
)->error
)) {
931 bool had_dst
= *dst
!= NULL
;
934 *dst
= ip6_route_output(net
, sk
, fl6
);
935 rt
= (*dst
)->error
? NULL
: (struct rt6_info
*)*dst
;
936 err
= ip6_route_get_saddr(net
, rt
, &fl6
->daddr
,
937 sk
? inet6_sk(sk
)->srcprefs
: 0,
940 goto out_err_release
;
942 /* If we had an erroneous initial result, pretend it
943 * never existed and let the SA-enabled version take
946 if (!had_dst
&& (*dst
)->error
) {
952 flags
|= RT6_LOOKUP_F_IFACE
;
956 *dst
= ip6_route_output_flags(net
, sk
, fl6
, flags
);
960 goto out_err_release
;
962 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
964 * Here if the dst entry we've looked up
965 * has a neighbour entry that is in the INCOMPLETE
966 * state and the src address from the flow is
967 * marked as OPTIMISTIC, we release the found
968 * dst entry and replace it instead with the
969 * dst entry of the nexthop router
971 rt
= (struct rt6_info
*) *dst
;
973 n
= __ipv6_neigh_lookup_noref(rt
->dst
.dev
,
974 rt6_nexthop(rt
, &fl6
->daddr
));
975 err
= n
&& !(n
->nud_state
& NUD_VALID
) ? -EINVAL
: 0;
976 rcu_read_unlock_bh();
979 struct inet6_ifaddr
*ifp
;
980 struct flowi6 fl_gw6
;
983 ifp
= ipv6_get_ifaddr(net
, &fl6
->saddr
,
986 redirect
= (ifp
&& ifp
->flags
& IFA_F_OPTIMISTIC
);
992 * We need to get the dst entry for the
993 * default router instead
996 memcpy(&fl_gw6
, fl6
, sizeof(struct flowi6
));
997 memset(&fl_gw6
.daddr
, 0, sizeof(struct in6_addr
));
998 *dst
= ip6_route_output(net
, sk
, &fl_gw6
);
1001 goto out_err_release
;
1012 if (err
== -ENETUNREACH
)
1013 IP6_INC_STATS(net
, NULL
, IPSTATS_MIB_OUTNOROUTES
);
1018 * ip6_dst_lookup - perform route lookup on flow
1019 * @sk: socket which provides route info
1020 * @dst: pointer to dst_entry * for result
1021 * @fl6: flow to lookup
1023 * This function performs a route lookup on the given flow.
1025 * It returns zero on success, or a standard errno code on error.
1027 int ip6_dst_lookup(struct net
*net
, struct sock
*sk
, struct dst_entry
**dst
,
1031 return ip6_dst_lookup_tail(net
, sk
, dst
, fl6
);
1033 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
1036 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1037 * @sk: socket which provides route info
1038 * @fl6: flow to lookup
1039 * @final_dst: final destination address for ipsec lookup
1041 * This function performs a route lookup on the given flow.
1043 * It returns a valid dst pointer on success, or a pointer encoded
1046 struct dst_entry
*ip6_dst_lookup_flow(const struct sock
*sk
, struct flowi6
*fl6
,
1047 const struct in6_addr
*final_dst
)
1049 struct dst_entry
*dst
= NULL
;
1052 err
= ip6_dst_lookup_tail(sock_net(sk
), sk
, &dst
, fl6
);
1054 return ERR_PTR(err
);
1056 fl6
->daddr
= *final_dst
;
1057 if (!fl6
->flowi6_oif
)
1058 fl6
->flowi6_oif
= l3mdev_fib_oif(dst
->dev
);
1060 return xfrm_lookup_route(sock_net(sk
), dst
, flowi6_to_flowi(fl6
), sk
, 0);
1062 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow
);
1065 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1066 * @sk: socket which provides the dst cache and route info
1067 * @fl6: flow to lookup
1068 * @final_dst: final destination address for ipsec lookup
1070 * This function performs a route lookup on the given flow with the
1071 * possibility of using the cached route in the socket if it is valid.
1072 * It will take the socket dst lock when operating on the dst cache.
1073 * As a result, this function can only be used in process context.
1075 * It returns a valid dst pointer on success, or a pointer encoded
1078 struct dst_entry
*ip6_sk_dst_lookup_flow(struct sock
*sk
, struct flowi6
*fl6
,
1079 const struct in6_addr
*final_dst
)
1081 struct dst_entry
*dst
= sk_dst_check(sk
, inet6_sk(sk
)->dst_cookie
);
1083 dst
= ip6_sk_dst_check(sk
, dst
, fl6
);
1085 dst
= ip6_dst_lookup_flow(sk
, fl6
, final_dst
);
1089 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow
);
1091 static inline int ip6_ufo_append_data(struct sock
*sk
,
1092 struct sk_buff_head
*queue
,
1093 int getfrag(void *from
, char *to
, int offset
, int len
,
1094 int odd
, struct sk_buff
*skb
),
1095 void *from
, int length
, int hh_len
, int fragheaderlen
,
1096 int exthdrlen
, int transhdrlen
, int mtu
,
1097 unsigned int flags
, const struct flowi6
*fl6
)
1100 struct sk_buff
*skb
;
1103 /* There is support for UDP large send offload by network
1104 * device, so create one single skb packet containing complete
1107 skb
= skb_peek_tail(queue
);
1109 skb
= sock_alloc_send_skb(sk
,
1110 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
1111 (flags
& MSG_DONTWAIT
), &err
);
1115 /* reserve space for Hardware header */
1116 skb_reserve(skb
, hh_len
);
1118 /* create space for UDP/IP header */
1119 skb_put(skb
, fragheaderlen
+ transhdrlen
);
1121 /* initialize network header pointer */
1122 skb_set_network_header(skb
, exthdrlen
);
1124 /* initialize protocol header pointer */
1125 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
1127 skb
->protocol
= htons(ETH_P_IPV6
);
1130 __skb_queue_tail(queue
, skb
);
1131 } else if (skb_is_gso(skb
)) {
1135 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1136 /* Specify the length of each IPv6 datagram fragment.
1137 * It has to be a multiple of 8.
1139 skb_shinfo(skb
)->gso_size
= (mtu
- fragheaderlen
-
1140 sizeof(struct frag_hdr
)) & ~7;
1141 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1142 skb_shinfo(skb
)->ip6_frag_id
= ipv6_select_ident(sock_net(sk
),
1147 return skb_append_datato_frags(sk
, skb
, getfrag
, from
,
1148 (length
- transhdrlen
));
1151 static inline struct ipv6_opt_hdr
*ip6_opt_dup(struct ipv6_opt_hdr
*src
,
1154 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1157 static inline struct ipv6_rt_hdr
*ip6_rthdr_dup(struct ipv6_rt_hdr
*src
,
1160 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1163 static void ip6_append_data_mtu(unsigned int *mtu
,
1165 unsigned int fragheaderlen
,
1166 struct sk_buff
*skb
,
1167 struct rt6_info
*rt
,
1168 unsigned int orig_mtu
)
1170 if (!(rt
->dst
.flags
& DST_XFRM_TUNNEL
)) {
1172 /* first fragment, reserve header_len */
1173 *mtu
= orig_mtu
- rt
->dst
.header_len
;
1177 * this fragment is not first, the headers
1178 * space is regarded as data space.
1182 *maxfraglen
= ((*mtu
- fragheaderlen
) & ~7)
1183 + fragheaderlen
- sizeof(struct frag_hdr
);
1187 static int ip6_setup_cork(struct sock
*sk
, struct inet_cork_full
*cork
,
1188 struct inet6_cork
*v6_cork
, struct ipcm6_cookie
*ipc6
,
1189 struct rt6_info
*rt
, struct flowi6
*fl6
)
1191 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1193 struct ipv6_txoptions
*opt
= ipc6
->opt
;
1199 if (WARN_ON(v6_cork
->opt
))
1202 v6_cork
->opt
= kzalloc(opt
->tot_len
, sk
->sk_allocation
);
1203 if (unlikely(!v6_cork
->opt
))
1206 v6_cork
->opt
->tot_len
= opt
->tot_len
;
1207 v6_cork
->opt
->opt_flen
= opt
->opt_flen
;
1208 v6_cork
->opt
->opt_nflen
= opt
->opt_nflen
;
1210 v6_cork
->opt
->dst0opt
= ip6_opt_dup(opt
->dst0opt
,
1212 if (opt
->dst0opt
&& !v6_cork
->opt
->dst0opt
)
1215 v6_cork
->opt
->dst1opt
= ip6_opt_dup(opt
->dst1opt
,
1217 if (opt
->dst1opt
&& !v6_cork
->opt
->dst1opt
)
1220 v6_cork
->opt
->hopopt
= ip6_opt_dup(opt
->hopopt
,
1222 if (opt
->hopopt
&& !v6_cork
->opt
->hopopt
)
1225 v6_cork
->opt
->srcrt
= ip6_rthdr_dup(opt
->srcrt
,
1227 if (opt
->srcrt
&& !v6_cork
->opt
->srcrt
)
1230 /* need source address above miyazawa*/
1233 cork
->base
.dst
= &rt
->dst
;
1234 cork
->fl
.u
.ip6
= *fl6
;
1235 v6_cork
->hop_limit
= ipc6
->hlimit
;
1236 v6_cork
->tclass
= ipc6
->tclass
;
1237 if (rt
->dst
.flags
& DST_XFRM_TUNNEL
)
1238 mtu
= np
->pmtudisc
>= IPV6_PMTUDISC_PROBE
?
1239 rt
->dst
.dev
->mtu
: dst_mtu(&rt
->dst
);
1241 mtu
= np
->pmtudisc
>= IPV6_PMTUDISC_PROBE
?
1242 rt
->dst
.dev
->mtu
: dst_mtu(rt
->dst
.path
);
1243 if (np
->frag_size
< mtu
) {
1245 mtu
= np
->frag_size
;
1247 cork
->base
.fragsize
= mtu
;
1248 if (dst_allfrag(rt
->dst
.path
))
1249 cork
->base
.flags
|= IPCORK_ALLFRAG
;
1250 cork
->base
.length
= 0;
1255 static int __ip6_append_data(struct sock
*sk
,
1257 struct sk_buff_head
*queue
,
1258 struct inet_cork
*cork
,
1259 struct inet6_cork
*v6_cork
,
1260 struct page_frag
*pfrag
,
1261 int getfrag(void *from
, char *to
, int offset
,
1262 int len
, int odd
, struct sk_buff
*skb
),
1263 void *from
, int length
, int transhdrlen
,
1264 unsigned int flags
, struct ipcm6_cookie
*ipc6
,
1265 const struct sockcm_cookie
*sockc
)
1267 struct sk_buff
*skb
, *skb_prev
= NULL
;
1268 unsigned int maxfraglen
, fragheaderlen
, mtu
, orig_mtu
;
1270 int dst_exthdrlen
= 0;
1277 struct rt6_info
*rt
= (struct rt6_info
*)cork
->dst
;
1278 struct ipv6_txoptions
*opt
= v6_cork
->opt
;
1279 int csummode
= CHECKSUM_NONE
;
1280 unsigned int maxnonfragsize
, headersize
;
1282 skb
= skb_peek_tail(queue
);
1284 exthdrlen
= opt
? opt
->opt_flen
: 0;
1285 dst_exthdrlen
= rt
->dst
.header_len
- rt
->rt6i_nfheader_len
;
1288 mtu
= cork
->fragsize
;
1291 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1293 fragheaderlen
= sizeof(struct ipv6hdr
) + rt
->rt6i_nfheader_len
+
1294 (opt
? opt
->opt_nflen
: 0);
1295 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
-
1296 sizeof(struct frag_hdr
);
1298 headersize
= sizeof(struct ipv6hdr
) +
1299 (opt
? opt
->opt_flen
+ opt
->opt_nflen
: 0) +
1300 (dst_allfrag(&rt
->dst
) ?
1301 sizeof(struct frag_hdr
) : 0) +
1302 rt
->rt6i_nfheader_len
;
1304 if (cork
->length
+ length
> mtu
- headersize
&& ipc6
->dontfrag
&&
1305 (sk
->sk_protocol
== IPPROTO_UDP
||
1306 sk
->sk_protocol
== IPPROTO_RAW
)) {
1307 ipv6_local_rxpmtu(sk
, fl6
, mtu
- headersize
+
1308 sizeof(struct ipv6hdr
));
1312 if (ip6_sk_ignore_df(sk
))
1313 maxnonfragsize
= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
;
1315 maxnonfragsize
= mtu
;
1317 if (cork
->length
+ length
> maxnonfragsize
- headersize
) {
1319 ipv6_local_error(sk
, EMSGSIZE
, fl6
,
1321 sizeof(struct ipv6hdr
));
1325 /* CHECKSUM_PARTIAL only with no extension headers and when
1326 * we are not going to fragment
1328 if (transhdrlen
&& sk
->sk_protocol
== IPPROTO_UDP
&&
1329 headersize
== sizeof(struct ipv6hdr
) &&
1330 length
< mtu
- headersize
&&
1331 !(flags
& MSG_MORE
) &&
1332 rt
->dst
.dev
->features
& (NETIF_F_IPV6_CSUM
| NETIF_F_HW_CSUM
))
1333 csummode
= CHECKSUM_PARTIAL
;
1335 if (sk
->sk_type
== SOCK_DGRAM
|| sk
->sk_type
== SOCK_RAW
) {
1336 sock_tx_timestamp(sk
, sockc
->tsflags
, &tx_flags
);
1337 if (tx_flags
& SKBTX_ANY_SW_TSTAMP
&&
1338 sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
)
1339 tskey
= sk
->sk_tskey
++;
1343 * Let's try using as much space as possible.
1344 * Use MTU if total length of the message fits into the MTU.
1345 * Otherwise, we need to reserve fragment header and
1346 * fragment alignment (= 8-15 octects, in total).
1348 * Note that we may need to "move" the data from the tail of
1349 * of the buffer to the new fragment when we split
1352 * FIXME: It may be fragmented into multiple chunks
1353 * at once if non-fragmentable extension headers
1358 cork
->length
+= length
;
1359 if (((length
> mtu
) ||
1360 (skb
&& skb_is_gso(skb
))) &&
1361 (sk
->sk_protocol
== IPPROTO_UDP
) &&
1362 (rt
->dst
.dev
->features
& NETIF_F_UFO
) &&
1363 (sk
->sk_type
== SOCK_DGRAM
) && !udp_get_no_check6_tx(sk
)) {
1364 err
= ip6_ufo_append_data(sk
, queue
, getfrag
, from
, length
,
1365 hh_len
, fragheaderlen
, exthdrlen
,
1366 transhdrlen
, mtu
, flags
, fl6
);
1375 while (length
> 0) {
1376 /* Check if the remaining data fits into current packet. */
1377 copy
= (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
1379 copy
= maxfraglen
- skb
->len
;
1383 unsigned int datalen
;
1384 unsigned int fraglen
;
1385 unsigned int fraggap
;
1386 unsigned int alloclen
;
1388 /* There's no room in the current skb */
1390 fraggap
= skb
->len
- maxfraglen
;
1393 /* update mtu and maxfraglen if necessary */
1394 if (!skb
|| !skb_prev
)
1395 ip6_append_data_mtu(&mtu
, &maxfraglen
,
1396 fragheaderlen
, skb
, rt
,
1402 * If remaining data exceeds the mtu,
1403 * we know we need more fragment(s).
1405 datalen
= length
+ fraggap
;
1407 if (datalen
> (cork
->length
<= mtu
&& !(cork
->flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
1408 datalen
= maxfraglen
- fragheaderlen
- rt
->dst
.trailer_len
;
1409 if ((flags
& MSG_MORE
) &&
1410 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1413 alloclen
= datalen
+ fragheaderlen
;
1415 alloclen
+= dst_exthdrlen
;
1417 if (datalen
!= length
+ fraggap
) {
1419 * this is not the last fragment, the trailer
1420 * space is regarded as data space.
1422 datalen
+= rt
->dst
.trailer_len
;
1425 alloclen
+= rt
->dst
.trailer_len
;
1426 fraglen
= datalen
+ fragheaderlen
;
1429 * We just reserve space for fragment header.
1430 * Note: this may be overallocation if the message
1431 * (without MSG_MORE) fits into the MTU.
1433 alloclen
+= sizeof(struct frag_hdr
);
1436 skb
= sock_alloc_send_skb(sk
,
1438 (flags
& MSG_DONTWAIT
), &err
);
1441 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1443 skb
= sock_wmalloc(sk
,
1444 alloclen
+ hh_len
, 1,
1452 * Fill in the control structures
1454 skb
->protocol
= htons(ETH_P_IPV6
);
1455 skb
->ip_summed
= csummode
;
1457 /* reserve for fragmentation and ipsec header */
1458 skb_reserve(skb
, hh_len
+ sizeof(struct frag_hdr
) +
1461 /* Only the initial fragment is time stamped */
1462 skb_shinfo(skb
)->tx_flags
= tx_flags
;
1464 skb_shinfo(skb
)->tskey
= tskey
;
1468 * Find where to start putting bytes
1470 data
= skb_put(skb
, fraglen
);
1471 skb_set_network_header(skb
, exthdrlen
);
1472 data
+= fragheaderlen
;
1473 skb
->transport_header
= (skb
->network_header
+
1476 skb
->csum
= skb_copy_and_csum_bits(
1477 skb_prev
, maxfraglen
,
1478 data
+ transhdrlen
, fraggap
, 0);
1479 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1482 pskb_trim_unique(skb_prev
, maxfraglen
);
1484 copy
= datalen
- transhdrlen
- fraggap
;
1490 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1497 length
-= datalen
- fraggap
;
1503 * Put the packet on the pending queue
1505 __skb_queue_tail(queue
, skb
);
1512 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1516 if (getfrag(from
, skb_put(skb
, copy
),
1517 offset
, copy
, off
, skb
) < 0) {
1518 __skb_trim(skb
, off
);
1523 int i
= skb_shinfo(skb
)->nr_frags
;
1526 if (!sk_page_frag_refill(sk
, pfrag
))
1529 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1532 if (i
== MAX_SKB_FRAGS
)
1535 __skb_fill_page_desc(skb
, i
, pfrag
->page
,
1537 skb_shinfo(skb
)->nr_frags
= ++i
;
1538 get_page(pfrag
->page
);
1540 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1542 page_address(pfrag
->page
) + pfrag
->offset
,
1543 offset
, copy
, skb
->len
, skb
) < 0)
1546 pfrag
->offset
+= copy
;
1547 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1549 skb
->data_len
+= copy
;
1550 skb
->truesize
+= copy
;
1551 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1562 cork
->length
-= length
;
1563 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1567 int ip6_append_data(struct sock
*sk
,
1568 int getfrag(void *from
, char *to
, int offset
, int len
,
1569 int odd
, struct sk_buff
*skb
),
1570 void *from
, int length
, int transhdrlen
,
1571 struct ipcm6_cookie
*ipc6
, struct flowi6
*fl6
,
1572 struct rt6_info
*rt
, unsigned int flags
,
1573 const struct sockcm_cookie
*sockc
)
1575 struct inet_sock
*inet
= inet_sk(sk
);
1576 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1580 if (flags
&MSG_PROBE
)
1582 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1586 err
= ip6_setup_cork(sk
, &inet
->cork
, &np
->cork
,
1591 exthdrlen
= (ipc6
->opt
? ipc6
->opt
->opt_flen
: 0);
1592 length
+= exthdrlen
;
1593 transhdrlen
+= exthdrlen
;
1595 fl6
= &inet
->cork
.fl
.u
.ip6
;
1599 return __ip6_append_data(sk
, fl6
, &sk
->sk_write_queue
, &inet
->cork
.base
,
1600 &np
->cork
, sk_page_frag(sk
), getfrag
,
1601 from
, length
, transhdrlen
, flags
, ipc6
, sockc
);
1603 EXPORT_SYMBOL_GPL(ip6_append_data
);
1605 static void ip6_cork_release(struct inet_cork_full
*cork
,
1606 struct inet6_cork
*v6_cork
)
1609 kfree(v6_cork
->opt
->dst0opt
);
1610 kfree(v6_cork
->opt
->dst1opt
);
1611 kfree(v6_cork
->opt
->hopopt
);
1612 kfree(v6_cork
->opt
->srcrt
);
1613 kfree(v6_cork
->opt
);
1614 v6_cork
->opt
= NULL
;
1617 if (cork
->base
.dst
) {
1618 dst_release(cork
->base
.dst
);
1619 cork
->base
.dst
= NULL
;
1620 cork
->base
.flags
&= ~IPCORK_ALLFRAG
;
1622 memset(&cork
->fl
, 0, sizeof(cork
->fl
));
1625 struct sk_buff
*__ip6_make_skb(struct sock
*sk
,
1626 struct sk_buff_head
*queue
,
1627 struct inet_cork_full
*cork
,
1628 struct inet6_cork
*v6_cork
)
1630 struct sk_buff
*skb
, *tmp_skb
;
1631 struct sk_buff
**tail_skb
;
1632 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1633 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1634 struct net
*net
= sock_net(sk
);
1635 struct ipv6hdr
*hdr
;
1636 struct ipv6_txoptions
*opt
= v6_cork
->opt
;
1637 struct rt6_info
*rt
= (struct rt6_info
*)cork
->base
.dst
;
1638 struct flowi6
*fl6
= &cork
->fl
.u
.ip6
;
1639 unsigned char proto
= fl6
->flowi6_proto
;
1641 skb
= __skb_dequeue(queue
);
1644 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1646 /* move skb->data to ip header from ext header */
1647 if (skb
->data
< skb_network_header(skb
))
1648 __skb_pull(skb
, skb_network_offset(skb
));
1649 while ((tmp_skb
= __skb_dequeue(queue
)) != NULL
) {
1650 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1651 *tail_skb
= tmp_skb
;
1652 tail_skb
= &(tmp_skb
->next
);
1653 skb
->len
+= tmp_skb
->len
;
1654 skb
->data_len
+= tmp_skb
->len
;
1655 skb
->truesize
+= tmp_skb
->truesize
;
1656 tmp_skb
->destructor
= NULL
;
1660 /* Allow local fragmentation. */
1661 skb
->ignore_df
= ip6_sk_ignore_df(sk
);
1663 *final_dst
= fl6
->daddr
;
1664 __skb_pull(skb
, skb_network_header_len(skb
));
1665 if (opt
&& opt
->opt_flen
)
1666 ipv6_push_frag_opts(skb
, opt
, &proto
);
1667 if (opt
&& opt
->opt_nflen
)
1668 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1670 skb_push(skb
, sizeof(struct ipv6hdr
));
1671 skb_reset_network_header(skb
);
1672 hdr
= ipv6_hdr(skb
);
1674 ip6_flow_hdr(hdr
, v6_cork
->tclass
,
1675 ip6_make_flowlabel(net
, skb
, fl6
->flowlabel
,
1676 np
->autoflowlabel
, fl6
));
1677 hdr
->hop_limit
= v6_cork
->hop_limit
;
1678 hdr
->nexthdr
= proto
;
1679 hdr
->saddr
= fl6
->saddr
;
1680 hdr
->daddr
= *final_dst
;
1682 skb
->priority
= sk
->sk_priority
;
1683 skb
->mark
= sk
->sk_mark
;
1685 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1686 IP6_UPD_PO_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
1687 if (proto
== IPPROTO_ICMPV6
) {
1688 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
1690 ICMP6MSGOUT_INC_STATS(net
, idev
, icmp6_hdr(skb
)->icmp6_type
);
1691 ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTMSGS
);
1694 ip6_cork_release(cork
, v6_cork
);
1699 int ip6_send_skb(struct sk_buff
*skb
)
1701 struct net
*net
= sock_net(skb
->sk
);
1702 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
1705 err
= ip6_local_out(net
, skb
->sk
, skb
);
1708 err
= net_xmit_errno(err
);
1710 IP6_INC_STATS(net
, rt
->rt6i_idev
,
1711 IPSTATS_MIB_OUTDISCARDS
);
1717 int ip6_push_pending_frames(struct sock
*sk
)
1719 struct sk_buff
*skb
;
1721 skb
= ip6_finish_skb(sk
);
1725 return ip6_send_skb(skb
);
1727 EXPORT_SYMBOL_GPL(ip6_push_pending_frames
);
1729 static void __ip6_flush_pending_frames(struct sock
*sk
,
1730 struct sk_buff_head
*queue
,
1731 struct inet_cork_full
*cork
,
1732 struct inet6_cork
*v6_cork
)
1734 struct sk_buff
*skb
;
1736 while ((skb
= __skb_dequeue_tail(queue
)) != NULL
) {
1738 IP6_INC_STATS(sock_net(sk
), ip6_dst_idev(skb_dst(skb
)),
1739 IPSTATS_MIB_OUTDISCARDS
);
1743 ip6_cork_release(cork
, v6_cork
);
1746 void ip6_flush_pending_frames(struct sock
*sk
)
1748 __ip6_flush_pending_frames(sk
, &sk
->sk_write_queue
,
1749 &inet_sk(sk
)->cork
, &inet6_sk(sk
)->cork
);
1751 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames
);
1753 struct sk_buff
*ip6_make_skb(struct sock
*sk
,
1754 int getfrag(void *from
, char *to
, int offset
,
1755 int len
, int odd
, struct sk_buff
*skb
),
1756 void *from
, int length
, int transhdrlen
,
1757 struct ipcm6_cookie
*ipc6
, struct flowi6
*fl6
,
1758 struct rt6_info
*rt
, unsigned int flags
,
1759 const struct sockcm_cookie
*sockc
)
1761 struct inet_cork_full cork
;
1762 struct inet6_cork v6_cork
;
1763 struct sk_buff_head queue
;
1764 int exthdrlen
= (ipc6
->opt
? ipc6
->opt
->opt_flen
: 0);
1767 if (flags
& MSG_PROBE
)
1770 __skb_queue_head_init(&queue
);
1772 cork
.base
.flags
= 0;
1774 cork
.base
.opt
= NULL
;
1776 err
= ip6_setup_cork(sk
, &cork
, &v6_cork
, ipc6
, rt
, fl6
);
1778 return ERR_PTR(err
);
1780 if (ipc6
->dontfrag
< 0)
1781 ipc6
->dontfrag
= inet6_sk(sk
)->dontfrag
;
1783 err
= __ip6_append_data(sk
, fl6
, &queue
, &cork
.base
, &v6_cork
,
1784 ¤t
->task_frag
, getfrag
, from
,
1785 length
+ exthdrlen
, transhdrlen
+ exthdrlen
,
1786 flags
, ipc6
, sockc
);
1788 __ip6_flush_pending_frames(sk
, &queue
, &cork
, &v6_cork
);
1789 return ERR_PTR(err
);
1792 return __ip6_make_skb(sk
, &queue
, &cork
, &v6_cork
);