Linux 3.17-rc2
[linux/fpc-iii.git] / net / ipv6 / ip6_output.c
blob315a55d66079cb7129dbfe183e31bcceffd63f99
1 /*
2 * IPv6 output functions
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Changes:
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
20 * etc.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
26 * for datagram xmit
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
45 #include <net/sock.h>
46 #include <net/snmp.h>
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_finish_output2(struct sk_buff *skb)
61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev;
63 struct neighbour *neigh;
64 struct in6_addr *nexthop;
65 int ret;
67 skb->protocol = htons(ETH_P_IPV6);
68 skb->dev = dev;
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77 &ipv6_hdr(skb)->saddr))) {
78 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
80 /* Do not check for IFF_ALLMULTI; multicast routing
81 is not supported in any case.
83 if (newskb)
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 newskb, NULL, newskb->dev,
86 dev_loopback_xmit);
88 if (ipv6_hdr(skb)->hop_limit == 0) {
89 IP6_INC_STATS(dev_net(dev), idev,
90 IPSTATS_MIB_OUTDISCARDS);
91 kfree_skb(skb);
92 return 0;
96 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97 skb->len);
99 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100 IPV6_ADDR_SCOPE_NODELOCAL &&
101 !(dev->flags & IFF_LOOPBACK)) {
102 kfree_skb(skb);
103 return 0;
107 rcu_read_lock_bh();
108 nexthop = rt6_nexthop((struct rt6_info *)dst);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112 if (!IS_ERR(neigh)) {
113 ret = dst_neigh_output(dst, neigh, skb);
114 rcu_read_unlock_bh();
115 return ret;
117 rcu_read_unlock_bh();
119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 kfree_skb(skb);
122 return -EINVAL;
125 static int ip6_finish_output(struct sk_buff *skb)
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(skb, ip6_finish_output2);
131 else
132 return ip6_finish_output2(skb);
135 int ip6_output(struct sock *sk, struct sk_buff *skb)
137 struct net_device *dev = skb_dst(skb)->dev;
138 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139 if (unlikely(idev->cnf.disable_ipv6)) {
140 IP6_INC_STATS(dev_net(dev), idev,
141 IPSTATS_MIB_OUTDISCARDS);
142 kfree_skb(skb);
143 return 0;
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147 ip6_finish_output,
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
152 * xmit an sk_buff (used by TCP, SCTP and DCCP)
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156 struct ipv6_txoptions *opt, int tclass)
158 struct net *net = sock_net(sk);
159 struct ipv6_pinfo *np = inet6_sk(sk);
160 struct in6_addr *first_hop = &fl6->daddr;
161 struct dst_entry *dst = skb_dst(skb);
162 struct ipv6hdr *hdr;
163 u8 proto = fl6->flowi6_proto;
164 int seg_len = skb->len;
165 int hlimit = -1;
166 u32 mtu;
168 if (opt) {
169 unsigned int head_room;
171 /* First: exthdrs may take lots of space (~8K for now)
172 MAX_HEADER is not enough.
174 head_room = opt->opt_nflen + opt->opt_flen;
175 seg_len += head_room;
176 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
178 if (skb_headroom(skb) < head_room) {
179 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180 if (skb2 == NULL) {
181 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182 IPSTATS_MIB_OUTDISCARDS);
183 kfree_skb(skb);
184 return -ENOBUFS;
186 consume_skb(skb);
187 skb = skb2;
188 skb_set_owner_w(skb, sk);
190 if (opt->opt_flen)
191 ipv6_push_frag_opts(skb, opt, &proto);
192 if (opt->opt_nflen)
193 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
196 skb_push(skb, sizeof(struct ipv6hdr));
197 skb_reset_network_header(skb);
198 hdr = ipv6_hdr(skb);
201 * Fill in the IPv6 header
203 if (np)
204 hlimit = np->hop_limit;
205 if (hlimit < 0)
206 hlimit = ip6_dst_hoplimit(dst);
208 ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
209 np->autoflowlabel));
211 hdr->payload_len = htons(seg_len);
212 hdr->nexthdr = proto;
213 hdr->hop_limit = hlimit;
215 hdr->saddr = fl6->saddr;
216 hdr->daddr = *first_hop;
218 skb->protocol = htons(ETH_P_IPV6);
219 skb->priority = sk->sk_priority;
220 skb->mark = sk->sk_mark;
222 mtu = dst_mtu(dst);
223 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
224 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 IPSTATS_MIB_OUT, skb->len);
226 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
227 dst->dev, dst_output);
230 skb->dev = dst->dev;
231 ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
232 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
233 kfree_skb(skb);
234 return -EMSGSIZE;
237 EXPORT_SYMBOL(ip6_xmit);
239 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
241 struct ip6_ra_chain *ra;
242 struct sock *last = NULL;
244 read_lock(&ip6_ra_lock);
245 for (ra = ip6_ra_chain; ra; ra = ra->next) {
246 struct sock *sk = ra->sk;
247 if (sk && ra->sel == sel &&
248 (!sk->sk_bound_dev_if ||
249 sk->sk_bound_dev_if == skb->dev->ifindex)) {
250 if (last) {
251 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
252 if (skb2)
253 rawv6_rcv(last, skb2);
255 last = sk;
259 if (last) {
260 rawv6_rcv(last, skb);
261 read_unlock(&ip6_ra_lock);
262 return 1;
264 read_unlock(&ip6_ra_lock);
265 return 0;
268 static int ip6_forward_proxy_check(struct sk_buff *skb)
270 struct ipv6hdr *hdr = ipv6_hdr(skb);
271 u8 nexthdr = hdr->nexthdr;
272 __be16 frag_off;
273 int offset;
275 if (ipv6_ext_hdr(nexthdr)) {
276 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
277 if (offset < 0)
278 return 0;
279 } else
280 offset = sizeof(struct ipv6hdr);
282 if (nexthdr == IPPROTO_ICMPV6) {
283 struct icmp6hdr *icmp6;
285 if (!pskb_may_pull(skb, (skb_network_header(skb) +
286 offset + 1 - skb->data)))
287 return 0;
289 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
291 switch (icmp6->icmp6_type) {
292 case NDISC_ROUTER_SOLICITATION:
293 case NDISC_ROUTER_ADVERTISEMENT:
294 case NDISC_NEIGHBOUR_SOLICITATION:
295 case NDISC_NEIGHBOUR_ADVERTISEMENT:
296 case NDISC_REDIRECT:
297 /* For reaction involving unicast neighbor discovery
298 * message destined to the proxied address, pass it to
299 * input function.
301 return 1;
302 default:
303 break;
308 * The proxying router can't forward traffic sent to a link-local
309 * address, so signal the sender and discard the packet. This
310 * behavior is clarified by the MIPv6 specification.
312 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
313 dst_link_failure(skb);
314 return -1;
317 return 0;
320 static inline int ip6_forward_finish(struct sk_buff *skb)
322 return dst_output(skb);
325 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
327 unsigned int mtu;
328 struct inet6_dev *idev;
330 if (dst_metric_locked(dst, RTAX_MTU)) {
331 mtu = dst_metric_raw(dst, RTAX_MTU);
332 if (mtu)
333 return mtu;
336 mtu = IPV6_MIN_MTU;
337 rcu_read_lock();
338 idev = __in6_dev_get(dst->dev);
339 if (idev)
340 mtu = idev->cnf.mtu6;
341 rcu_read_unlock();
343 return mtu;
346 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
348 if (skb->len <= mtu)
349 return false;
351 /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
352 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
353 return true;
355 if (skb->ignore_df)
356 return false;
358 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
359 return false;
361 return true;
364 int ip6_forward(struct sk_buff *skb)
366 struct dst_entry *dst = skb_dst(skb);
367 struct ipv6hdr *hdr = ipv6_hdr(skb);
368 struct inet6_skb_parm *opt = IP6CB(skb);
369 struct net *net = dev_net(dst->dev);
370 u32 mtu;
372 if (net->ipv6.devconf_all->forwarding == 0)
373 goto error;
375 if (skb->pkt_type != PACKET_HOST)
376 goto drop;
378 if (skb_warn_if_lro(skb))
379 goto drop;
381 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
382 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
383 IPSTATS_MIB_INDISCARDS);
384 goto drop;
387 skb_forward_csum(skb);
390 * We DO NOT make any processing on
391 * RA packets, pushing them to user level AS IS
392 * without ane WARRANTY that application will be able
393 * to interpret them. The reason is that we
394 * cannot make anything clever here.
396 * We are not end-node, so that if packet contains
397 * AH/ESP, we cannot make anything.
398 * Defragmentation also would be mistake, RA packets
399 * cannot be fragmented, because there is no warranty
400 * that different fragments will go along one path. --ANK
402 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
403 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
404 return 0;
408 * check and decrement ttl
410 if (hdr->hop_limit <= 1) {
411 /* Force OUTPUT device used as source address */
412 skb->dev = dst->dev;
413 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
414 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
415 IPSTATS_MIB_INHDRERRORS);
417 kfree_skb(skb);
418 return -ETIMEDOUT;
421 /* XXX: idev->cnf.proxy_ndp? */
422 if (net->ipv6.devconf_all->proxy_ndp &&
423 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
424 int proxied = ip6_forward_proxy_check(skb);
425 if (proxied > 0)
426 return ip6_input(skb);
427 else if (proxied < 0) {
428 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
429 IPSTATS_MIB_INDISCARDS);
430 goto drop;
434 if (!xfrm6_route_forward(skb)) {
435 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
436 IPSTATS_MIB_INDISCARDS);
437 goto drop;
439 dst = skb_dst(skb);
441 /* IPv6 specs say nothing about it, but it is clear that we cannot
442 send redirects to source routed frames.
443 We don't send redirects to frames decapsulated from IPsec.
445 if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
446 struct in6_addr *target = NULL;
447 struct inet_peer *peer;
448 struct rt6_info *rt;
451 * incoming and outgoing devices are the same
452 * send a redirect.
455 rt = (struct rt6_info *) dst;
456 if (rt->rt6i_flags & RTF_GATEWAY)
457 target = &rt->rt6i_gateway;
458 else
459 target = &hdr->daddr;
461 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
463 /* Limit redirects both by destination (here)
464 and by source (inside ndisc_send_redirect)
466 if (inet_peer_xrlim_allow(peer, 1*HZ))
467 ndisc_send_redirect(skb, target);
468 if (peer)
469 inet_putpeer(peer);
470 } else {
471 int addrtype = ipv6_addr_type(&hdr->saddr);
473 /* This check is security critical. */
474 if (addrtype == IPV6_ADDR_ANY ||
475 addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
476 goto error;
477 if (addrtype & IPV6_ADDR_LINKLOCAL) {
478 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
479 ICMPV6_NOT_NEIGHBOUR, 0);
480 goto error;
484 mtu = ip6_dst_mtu_forward(dst);
485 if (mtu < IPV6_MIN_MTU)
486 mtu = IPV6_MIN_MTU;
488 if (ip6_pkt_too_big(skb, mtu)) {
489 /* Again, force OUTPUT device used as source address */
490 skb->dev = dst->dev;
491 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
492 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
493 IPSTATS_MIB_INTOOBIGERRORS);
494 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
495 IPSTATS_MIB_FRAGFAILS);
496 kfree_skb(skb);
497 return -EMSGSIZE;
500 if (skb_cow(skb, dst->dev->hard_header_len)) {
501 IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
502 IPSTATS_MIB_OUTDISCARDS);
503 goto drop;
506 hdr = ipv6_hdr(skb);
508 /* Mangling hops number delayed to point after skb COW */
510 hdr->hop_limit--;
512 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
513 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
514 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
515 ip6_forward_finish);
517 error:
518 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
519 drop:
520 kfree_skb(skb);
521 return -EINVAL;
524 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
526 to->pkt_type = from->pkt_type;
527 to->priority = from->priority;
528 to->protocol = from->protocol;
529 skb_dst_drop(to);
530 skb_dst_set(to, dst_clone(skb_dst(from)));
531 to->dev = from->dev;
532 to->mark = from->mark;
534 #ifdef CONFIG_NET_SCHED
535 to->tc_index = from->tc_index;
536 #endif
537 nf_copy(to, from);
538 skb_copy_secmark(to, from);
541 static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
543 static u32 ip6_idents_hashrnd __read_mostly;
544 u32 hash, id;
546 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
548 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
549 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
551 id = ip_idents_reserve(hash, 1);
552 fhdr->identification = htonl(id);
555 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
557 struct sk_buff *frag;
558 struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
559 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
560 struct ipv6hdr *tmp_hdr;
561 struct frag_hdr *fh;
562 unsigned int mtu, hlen, left, len;
563 int hroom, troom;
564 __be32 frag_id = 0;
565 int ptr, offset = 0, err=0;
566 u8 *prevhdr, nexthdr = 0;
567 struct net *net = dev_net(skb_dst(skb)->dev);
569 hlen = ip6_find_1stfragopt(skb, &prevhdr);
570 nexthdr = *prevhdr;
572 mtu = ip6_skb_dst_mtu(skb);
574 /* We must not fragment if the socket is set to force MTU discovery
575 * or if the skb it not generated by a local socket.
577 if (unlikely(!skb->ignore_df && skb->len > mtu) ||
578 (IP6CB(skb)->frag_max_size &&
579 IP6CB(skb)->frag_max_size > mtu)) {
580 if (skb->sk && dst_allfrag(skb_dst(skb)))
581 sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
583 skb->dev = skb_dst(skb)->dev;
584 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
585 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
586 IPSTATS_MIB_FRAGFAILS);
587 kfree_skb(skb);
588 return -EMSGSIZE;
591 if (np && np->frag_size < mtu) {
592 if (np->frag_size)
593 mtu = np->frag_size;
595 mtu -= hlen + sizeof(struct frag_hdr);
597 if (skb_has_frag_list(skb)) {
598 int first_len = skb_pagelen(skb);
599 struct sk_buff *frag2;
601 if (first_len - hlen > mtu ||
602 ((first_len - hlen) & 7) ||
603 skb_cloned(skb))
604 goto slow_path;
606 skb_walk_frags(skb, frag) {
607 /* Correct geometry. */
608 if (frag->len > mtu ||
609 ((frag->len & 7) && frag->next) ||
610 skb_headroom(frag) < hlen)
611 goto slow_path_clean;
613 /* Partially cloned skb? */
614 if (skb_shared(frag))
615 goto slow_path_clean;
617 BUG_ON(frag->sk);
618 if (skb->sk) {
619 frag->sk = skb->sk;
620 frag->destructor = sock_wfree;
622 skb->truesize -= frag->truesize;
625 err = 0;
626 offset = 0;
627 frag = skb_shinfo(skb)->frag_list;
628 skb_frag_list_init(skb);
629 /* BUILD HEADER */
631 *prevhdr = NEXTHDR_FRAGMENT;
632 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
633 if (!tmp_hdr) {
634 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
635 IPSTATS_MIB_FRAGFAILS);
636 return -ENOMEM;
639 __skb_pull(skb, hlen);
640 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
641 __skb_push(skb, hlen);
642 skb_reset_network_header(skb);
643 memcpy(skb_network_header(skb), tmp_hdr, hlen);
645 ipv6_select_ident(fh, rt);
646 fh->nexthdr = nexthdr;
647 fh->reserved = 0;
648 fh->frag_off = htons(IP6_MF);
649 frag_id = fh->identification;
651 first_len = skb_pagelen(skb);
652 skb->data_len = first_len - skb_headlen(skb);
653 skb->len = first_len;
654 ipv6_hdr(skb)->payload_len = htons(first_len -
655 sizeof(struct ipv6hdr));
657 dst_hold(&rt->dst);
659 for (;;) {
660 /* Prepare header of the next frame,
661 * before previous one went down. */
662 if (frag) {
663 frag->ip_summed = CHECKSUM_NONE;
664 skb_reset_transport_header(frag);
665 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
666 __skb_push(frag, hlen);
667 skb_reset_network_header(frag);
668 memcpy(skb_network_header(frag), tmp_hdr,
669 hlen);
670 offset += skb->len - hlen - sizeof(struct frag_hdr);
671 fh->nexthdr = nexthdr;
672 fh->reserved = 0;
673 fh->frag_off = htons(offset);
674 if (frag->next != NULL)
675 fh->frag_off |= htons(IP6_MF);
676 fh->identification = frag_id;
677 ipv6_hdr(frag)->payload_len =
678 htons(frag->len -
679 sizeof(struct ipv6hdr));
680 ip6_copy_metadata(frag, skb);
683 err = output(skb);
684 if(!err)
685 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
686 IPSTATS_MIB_FRAGCREATES);
688 if (err || !frag)
689 break;
691 skb = frag;
692 frag = skb->next;
693 skb->next = NULL;
696 kfree(tmp_hdr);
698 if (err == 0) {
699 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700 IPSTATS_MIB_FRAGOKS);
701 ip6_rt_put(rt);
702 return 0;
705 while (frag) {
706 skb = frag->next;
707 kfree_skb(frag);
708 frag = skb;
711 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
712 IPSTATS_MIB_FRAGFAILS);
713 ip6_rt_put(rt);
714 return err;
716 slow_path_clean:
717 skb_walk_frags(skb, frag2) {
718 if (frag2 == frag)
719 break;
720 frag2->sk = NULL;
721 frag2->destructor = NULL;
722 skb->truesize += frag2->truesize;
726 slow_path:
727 if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
728 skb_checksum_help(skb))
729 goto fail;
731 left = skb->len - hlen; /* Space per frame */
732 ptr = hlen; /* Where to start from */
735 * Fragment the datagram.
738 *prevhdr = NEXTHDR_FRAGMENT;
739 hroom = LL_RESERVED_SPACE(rt->dst.dev);
740 troom = rt->dst.dev->needed_tailroom;
743 * Keep copying data until we run out.
745 while(left > 0) {
746 len = left;
747 /* IF: it doesn't fit, use 'mtu' - the data space left */
748 if (len > mtu)
749 len = mtu;
750 /* IF: we are not sending up to and including the packet end
751 then align the next start on an eight byte boundary */
752 if (len < left) {
753 len &= ~7;
756 * Allocate buffer.
759 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
760 hroom + troom, GFP_ATOMIC)) == NULL) {
761 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
762 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
763 IPSTATS_MIB_FRAGFAILS);
764 err = -ENOMEM;
765 goto fail;
769 * Set up data on packet
772 ip6_copy_metadata(frag, skb);
773 skb_reserve(frag, hroom);
774 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
775 skb_reset_network_header(frag);
776 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
777 frag->transport_header = (frag->network_header + hlen +
778 sizeof(struct frag_hdr));
781 * Charge the memory for the fragment to any owner
782 * it might possess
784 if (skb->sk)
785 skb_set_owner_w(frag, skb->sk);
788 * Copy the packet header into the new buffer.
790 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
793 * Build fragment header.
795 fh->nexthdr = nexthdr;
796 fh->reserved = 0;
797 if (!frag_id) {
798 ipv6_select_ident(fh, rt);
799 frag_id = fh->identification;
800 } else
801 fh->identification = frag_id;
804 * Copy a block of the IP datagram.
806 BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
807 len));
808 left -= len;
810 fh->frag_off = htons(offset);
811 if (left > 0)
812 fh->frag_off |= htons(IP6_MF);
813 ipv6_hdr(frag)->payload_len = htons(frag->len -
814 sizeof(struct ipv6hdr));
816 ptr += len;
817 offset += len;
820 * Put this fragment into the sending queue.
822 err = output(frag);
823 if (err)
824 goto fail;
826 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
827 IPSTATS_MIB_FRAGCREATES);
829 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
830 IPSTATS_MIB_FRAGOKS);
831 consume_skb(skb);
832 return err;
834 fail:
835 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
836 IPSTATS_MIB_FRAGFAILS);
837 kfree_skb(skb);
838 return err;
841 static inline int ip6_rt_check(const struct rt6key *rt_key,
842 const struct in6_addr *fl_addr,
843 const struct in6_addr *addr_cache)
845 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
846 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
849 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
850 struct dst_entry *dst,
851 const struct flowi6 *fl6)
853 struct ipv6_pinfo *np = inet6_sk(sk);
854 struct rt6_info *rt;
856 if (!dst)
857 goto out;
859 if (dst->ops->family != AF_INET6) {
860 dst_release(dst);
861 return NULL;
864 rt = (struct rt6_info *)dst;
865 /* Yes, checking route validity in not connected
866 * case is not very simple. Take into account,
867 * that we do not support routing by source, TOS,
868 * and MSG_DONTROUTE --ANK (980726)
870 * 1. ip6_rt_check(): If route was host route,
871 * check that cached destination is current.
872 * If it is network route, we still may
873 * check its validity using saved pointer
874 * to the last used address: daddr_cache.
875 * We do not want to save whole address now,
876 * (because main consumer of this service
877 * is tcp, which has not this problem),
878 * so that the last trick works only on connected
879 * sockets.
880 * 2. oif also should be the same.
882 if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
883 #ifdef CONFIG_IPV6_SUBTREES
884 ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
885 #endif
886 (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
887 dst_release(dst);
888 dst = NULL;
891 out:
892 return dst;
895 static int ip6_dst_lookup_tail(struct sock *sk,
896 struct dst_entry **dst, struct flowi6 *fl6)
898 struct net *net = sock_net(sk);
899 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
900 struct neighbour *n;
901 struct rt6_info *rt;
902 #endif
903 int err;
905 if (*dst == NULL)
906 *dst = ip6_route_output(net, sk, fl6);
908 if ((err = (*dst)->error))
909 goto out_err_release;
911 if (ipv6_addr_any(&fl6->saddr)) {
912 struct rt6_info *rt = (struct rt6_info *) *dst;
913 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
914 sk ? inet6_sk(sk)->srcprefs : 0,
915 &fl6->saddr);
916 if (err)
917 goto out_err_release;
920 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
922 * Here if the dst entry we've looked up
923 * has a neighbour entry that is in the INCOMPLETE
924 * state and the src address from the flow is
925 * marked as OPTIMISTIC, we release the found
926 * dst entry and replace it instead with the
927 * dst entry of the nexthop router
929 rt = (struct rt6_info *) *dst;
930 rcu_read_lock_bh();
931 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
932 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
933 rcu_read_unlock_bh();
935 if (err) {
936 struct inet6_ifaddr *ifp;
937 struct flowi6 fl_gw6;
938 int redirect;
940 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
941 (*dst)->dev, 1);
943 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
944 if (ifp)
945 in6_ifa_put(ifp);
947 if (redirect) {
949 * We need to get the dst entry for the
950 * default router instead
952 dst_release(*dst);
953 memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
954 memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
955 *dst = ip6_route_output(net, sk, &fl_gw6);
956 if ((err = (*dst)->error))
957 goto out_err_release;
960 #endif
962 return 0;
964 out_err_release:
965 if (err == -ENETUNREACH)
966 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
967 dst_release(*dst);
968 *dst = NULL;
969 return err;
973 * ip6_dst_lookup - perform route lookup on flow
974 * @sk: socket which provides route info
975 * @dst: pointer to dst_entry * for result
976 * @fl6: flow to lookup
978 * This function performs a route lookup on the given flow.
980 * It returns zero on success, or a standard errno code on error.
982 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
984 *dst = NULL;
985 return ip6_dst_lookup_tail(sk, dst, fl6);
987 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
990 * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
991 * @sk: socket which provides route info
992 * @fl6: flow to lookup
993 * @final_dst: final destination address for ipsec lookup
995 * This function performs a route lookup on the given flow.
997 * It returns a valid dst pointer on success, or a pointer encoded
998 * error code.
1000 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1001 const struct in6_addr *final_dst)
1003 struct dst_entry *dst = NULL;
1004 int err;
1006 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1007 if (err)
1008 return ERR_PTR(err);
1009 if (final_dst)
1010 fl6->daddr = *final_dst;
1012 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1014 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1017 * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1018 * @sk: socket which provides the dst cache and route info
1019 * @fl6: flow to lookup
1020 * @final_dst: final destination address for ipsec lookup
1022 * This function performs a route lookup on the given flow with the
1023 * possibility of using the cached route in the socket if it is valid.
1024 * It will take the socket dst lock when operating on the dst cache.
1025 * As a result, this function can only be used in process context.
1027 * It returns a valid dst pointer on success, or a pointer encoded
1028 * error code.
1030 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1031 const struct in6_addr *final_dst)
1033 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1034 int err;
1036 dst = ip6_sk_dst_check(sk, dst, fl6);
1038 err = ip6_dst_lookup_tail(sk, &dst, fl6);
1039 if (err)
1040 return ERR_PTR(err);
1041 if (final_dst)
1042 fl6->daddr = *final_dst;
1044 return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1046 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1048 static inline int ip6_ufo_append_data(struct sock *sk,
1049 int getfrag(void *from, char *to, int offset, int len,
1050 int odd, struct sk_buff *skb),
1051 void *from, int length, int hh_len, int fragheaderlen,
1052 int transhdrlen, int mtu,unsigned int flags,
1053 struct rt6_info *rt)
1056 struct sk_buff *skb;
1057 struct frag_hdr fhdr;
1058 int err;
1060 /* There is support for UDP large send offload by network
1061 * device, so create one single skb packet containing complete
1062 * udp datagram
1064 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1065 skb = sock_alloc_send_skb(sk,
1066 hh_len + fragheaderlen + transhdrlen + 20,
1067 (flags & MSG_DONTWAIT), &err);
1068 if (skb == NULL)
1069 return err;
1071 /* reserve space for Hardware header */
1072 skb_reserve(skb, hh_len);
1074 /* create space for UDP/IP header */
1075 skb_put(skb,fragheaderlen + transhdrlen);
1077 /* initialize network header pointer */
1078 skb_reset_network_header(skb);
1080 /* initialize protocol header pointer */
1081 skb->transport_header = skb->network_header + fragheaderlen;
1083 skb->protocol = htons(ETH_P_IPV6);
1084 skb->csum = 0;
1086 __skb_queue_tail(&sk->sk_write_queue, skb);
1087 } else if (skb_is_gso(skb)) {
1088 goto append;
1091 skb->ip_summed = CHECKSUM_PARTIAL;
1092 /* Specify the length of each IPv6 datagram fragment.
1093 * It has to be a multiple of 8.
1095 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1096 sizeof(struct frag_hdr)) & ~7;
1097 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1098 ipv6_select_ident(&fhdr, rt);
1099 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1101 append:
1102 return skb_append_datato_frags(sk, skb, getfrag, from,
1103 (length - transhdrlen));
1106 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1107 gfp_t gfp)
1109 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1112 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1113 gfp_t gfp)
1115 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1118 static void ip6_append_data_mtu(unsigned int *mtu,
1119 int *maxfraglen,
1120 unsigned int fragheaderlen,
1121 struct sk_buff *skb,
1122 struct rt6_info *rt,
1123 unsigned int orig_mtu)
1125 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1126 if (skb == NULL) {
1127 /* first fragment, reserve header_len */
1128 *mtu = orig_mtu - rt->dst.header_len;
1130 } else {
1132 * this fragment is not first, the headers
1133 * space is regarded as data space.
1135 *mtu = orig_mtu;
1137 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1138 + fragheaderlen - sizeof(struct frag_hdr);
1142 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1143 int offset, int len, int odd, struct sk_buff *skb),
1144 void *from, int length, int transhdrlen,
1145 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1146 struct rt6_info *rt, unsigned int flags, int dontfrag)
1148 struct inet_sock *inet = inet_sk(sk);
1149 struct ipv6_pinfo *np = inet6_sk(sk);
1150 struct inet_cork *cork;
1151 struct sk_buff *skb, *skb_prev = NULL;
1152 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1153 int exthdrlen;
1154 int dst_exthdrlen;
1155 int hh_len;
1156 int copy;
1157 int err;
1158 int offset = 0;
1159 __u8 tx_flags = 0;
1160 u32 tskey = 0;
1162 if (flags&MSG_PROBE)
1163 return 0;
1164 cork = &inet->cork.base;
1165 if (skb_queue_empty(&sk->sk_write_queue)) {
1167 * setup for corking
1169 if (opt) {
1170 if (WARN_ON(np->cork.opt))
1171 return -EINVAL;
1173 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1174 if (unlikely(np->cork.opt == NULL))
1175 return -ENOBUFS;
1177 np->cork.opt->tot_len = opt->tot_len;
1178 np->cork.opt->opt_flen = opt->opt_flen;
1179 np->cork.opt->opt_nflen = opt->opt_nflen;
1181 np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1182 sk->sk_allocation);
1183 if (opt->dst0opt && !np->cork.opt->dst0opt)
1184 return -ENOBUFS;
1186 np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1187 sk->sk_allocation);
1188 if (opt->dst1opt && !np->cork.opt->dst1opt)
1189 return -ENOBUFS;
1191 np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1192 sk->sk_allocation);
1193 if (opt->hopopt && !np->cork.opt->hopopt)
1194 return -ENOBUFS;
1196 np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1197 sk->sk_allocation);
1198 if (opt->srcrt && !np->cork.opt->srcrt)
1199 return -ENOBUFS;
1201 /* need source address above miyazawa*/
1203 dst_hold(&rt->dst);
1204 cork->dst = &rt->dst;
1205 inet->cork.fl.u.ip6 = *fl6;
1206 np->cork.hop_limit = hlimit;
1207 np->cork.tclass = tclass;
1208 if (rt->dst.flags & DST_XFRM_TUNNEL)
1209 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1210 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1211 else
1212 mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1213 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1214 if (np->frag_size < mtu) {
1215 if (np->frag_size)
1216 mtu = np->frag_size;
1218 cork->fragsize = mtu;
1219 if (dst_allfrag(rt->dst.path))
1220 cork->flags |= IPCORK_ALLFRAG;
1221 cork->length = 0;
1222 exthdrlen = (opt ? opt->opt_flen : 0);
1223 length += exthdrlen;
1224 transhdrlen += exthdrlen;
1225 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1226 } else {
1227 rt = (struct rt6_info *)cork->dst;
1228 fl6 = &inet->cork.fl.u.ip6;
1229 opt = np->cork.opt;
1230 transhdrlen = 0;
1231 exthdrlen = 0;
1232 dst_exthdrlen = 0;
1233 mtu = cork->fragsize;
1235 orig_mtu = mtu;
1237 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1239 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1240 (opt ? opt->opt_nflen : 0);
1241 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1242 sizeof(struct frag_hdr);
1244 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1245 unsigned int maxnonfragsize, headersize;
1247 headersize = sizeof(struct ipv6hdr) +
1248 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1249 (dst_allfrag(&rt->dst) ?
1250 sizeof(struct frag_hdr) : 0) +
1251 rt->rt6i_nfheader_len;
1253 if (ip6_sk_ignore_df(sk))
1254 maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1255 else
1256 maxnonfragsize = mtu;
1258 /* dontfrag active */
1259 if ((cork->length + length > mtu - headersize) && dontfrag &&
1260 (sk->sk_protocol == IPPROTO_UDP ||
1261 sk->sk_protocol == IPPROTO_RAW)) {
1262 ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1263 sizeof(struct ipv6hdr));
1264 goto emsgsize;
1267 if (cork->length + length > maxnonfragsize - headersize) {
1268 emsgsize:
1269 ipv6_local_error(sk, EMSGSIZE, fl6,
1270 mtu - headersize +
1271 sizeof(struct ipv6hdr));
1272 return -EMSGSIZE;
1276 if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1277 sock_tx_timestamp(sk, &tx_flags);
1278 if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1279 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1280 tskey = sk->sk_tskey++;
1284 * Let's try using as much space as possible.
1285 * Use MTU if total length of the message fits into the MTU.
1286 * Otherwise, we need to reserve fragment header and
1287 * fragment alignment (= 8-15 octects, in total).
1289 * Note that we may need to "move" the data from the tail of
1290 * of the buffer to the new fragment when we split
1291 * the message.
1293 * FIXME: It may be fragmented into multiple chunks
1294 * at once if non-fragmentable extension headers
1295 * are too large.
1296 * --yoshfuji
1299 skb = skb_peek_tail(&sk->sk_write_queue);
1300 cork->length += length;
1301 if (((length > mtu) ||
1302 (skb && skb_is_gso(skb))) &&
1303 (sk->sk_protocol == IPPROTO_UDP) &&
1304 (rt->dst.dev->features & NETIF_F_UFO)) {
1305 err = ip6_ufo_append_data(sk, getfrag, from, length,
1306 hh_len, fragheaderlen,
1307 transhdrlen, mtu, flags, rt);
1308 if (err)
1309 goto error;
1310 return 0;
1313 if (!skb)
1314 goto alloc_new_skb;
1316 while (length > 0) {
1317 /* Check if the remaining data fits into current packet. */
1318 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1319 if (copy < length)
1320 copy = maxfraglen - skb->len;
1322 if (copy <= 0) {
1323 char *data;
1324 unsigned int datalen;
1325 unsigned int fraglen;
1326 unsigned int fraggap;
1327 unsigned int alloclen;
1328 alloc_new_skb:
1329 /* There's no room in the current skb */
1330 if (skb)
1331 fraggap = skb->len - maxfraglen;
1332 else
1333 fraggap = 0;
1334 /* update mtu and maxfraglen if necessary */
1335 if (skb == NULL || skb_prev == NULL)
1336 ip6_append_data_mtu(&mtu, &maxfraglen,
1337 fragheaderlen, skb, rt,
1338 orig_mtu);
1340 skb_prev = skb;
1343 * If remaining data exceeds the mtu,
1344 * we know we need more fragment(s).
1346 datalen = length + fraggap;
1348 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1349 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1350 if ((flags & MSG_MORE) &&
1351 !(rt->dst.dev->features&NETIF_F_SG))
1352 alloclen = mtu;
1353 else
1354 alloclen = datalen + fragheaderlen;
1356 alloclen += dst_exthdrlen;
1358 if (datalen != length + fraggap) {
1360 * this is not the last fragment, the trailer
1361 * space is regarded as data space.
1363 datalen += rt->dst.trailer_len;
1366 alloclen += rt->dst.trailer_len;
1367 fraglen = datalen + fragheaderlen;
1370 * We just reserve space for fragment header.
1371 * Note: this may be overallocation if the message
1372 * (without MSG_MORE) fits into the MTU.
1374 alloclen += sizeof(struct frag_hdr);
1376 if (transhdrlen) {
1377 skb = sock_alloc_send_skb(sk,
1378 alloclen + hh_len,
1379 (flags & MSG_DONTWAIT), &err);
1380 } else {
1381 skb = NULL;
1382 if (atomic_read(&sk->sk_wmem_alloc) <=
1383 2 * sk->sk_sndbuf)
1384 skb = sock_wmalloc(sk,
1385 alloclen + hh_len, 1,
1386 sk->sk_allocation);
1387 if (unlikely(skb == NULL))
1388 err = -ENOBUFS;
1390 if (skb == NULL)
1391 goto error;
1393 * Fill in the control structures
1395 skb->protocol = htons(ETH_P_IPV6);
1396 skb->ip_summed = CHECKSUM_NONE;
1397 skb->csum = 0;
1398 /* reserve for fragmentation and ipsec header */
1399 skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1400 dst_exthdrlen);
1402 /* Only the initial fragment is time stamped */
1403 skb_shinfo(skb)->tx_flags = tx_flags;
1404 tx_flags = 0;
1405 skb_shinfo(skb)->tskey = tskey;
1406 tskey = 0;
1409 * Find where to start putting bytes
1411 data = skb_put(skb, fraglen);
1412 skb_set_network_header(skb, exthdrlen);
1413 data += fragheaderlen;
1414 skb->transport_header = (skb->network_header +
1415 fragheaderlen);
1416 if (fraggap) {
1417 skb->csum = skb_copy_and_csum_bits(
1418 skb_prev, maxfraglen,
1419 data + transhdrlen, fraggap, 0);
1420 skb_prev->csum = csum_sub(skb_prev->csum,
1421 skb->csum);
1422 data += fraggap;
1423 pskb_trim_unique(skb_prev, maxfraglen);
1425 copy = datalen - transhdrlen - fraggap;
1427 if (copy < 0) {
1428 err = -EINVAL;
1429 kfree_skb(skb);
1430 goto error;
1431 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1432 err = -EFAULT;
1433 kfree_skb(skb);
1434 goto error;
1437 offset += copy;
1438 length -= datalen - fraggap;
1439 transhdrlen = 0;
1440 exthdrlen = 0;
1441 dst_exthdrlen = 0;
1444 * Put the packet on the pending queue
1446 __skb_queue_tail(&sk->sk_write_queue, skb);
1447 continue;
1450 if (copy > length)
1451 copy = length;
1453 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1454 unsigned int off;
1456 off = skb->len;
1457 if (getfrag(from, skb_put(skb, copy),
1458 offset, copy, off, skb) < 0) {
1459 __skb_trim(skb, off);
1460 err = -EFAULT;
1461 goto error;
1463 } else {
1464 int i = skb_shinfo(skb)->nr_frags;
1465 struct page_frag *pfrag = sk_page_frag(sk);
1467 err = -ENOMEM;
1468 if (!sk_page_frag_refill(sk, pfrag))
1469 goto error;
1471 if (!skb_can_coalesce(skb, i, pfrag->page,
1472 pfrag->offset)) {
1473 err = -EMSGSIZE;
1474 if (i == MAX_SKB_FRAGS)
1475 goto error;
1477 __skb_fill_page_desc(skb, i, pfrag->page,
1478 pfrag->offset, 0);
1479 skb_shinfo(skb)->nr_frags = ++i;
1480 get_page(pfrag->page);
1482 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1483 if (getfrag(from,
1484 page_address(pfrag->page) + pfrag->offset,
1485 offset, copy, skb->len, skb) < 0)
1486 goto error_efault;
1488 pfrag->offset += copy;
1489 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1490 skb->len += copy;
1491 skb->data_len += copy;
1492 skb->truesize += copy;
1493 atomic_add(copy, &sk->sk_wmem_alloc);
1495 offset += copy;
1496 length -= copy;
1499 return 0;
1501 error_efault:
1502 err = -EFAULT;
1503 error:
1504 cork->length -= length;
1505 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1506 return err;
1508 EXPORT_SYMBOL_GPL(ip6_append_data);
1510 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1512 if (np->cork.opt) {
1513 kfree(np->cork.opt->dst0opt);
1514 kfree(np->cork.opt->dst1opt);
1515 kfree(np->cork.opt->hopopt);
1516 kfree(np->cork.opt->srcrt);
1517 kfree(np->cork.opt);
1518 np->cork.opt = NULL;
1521 if (inet->cork.base.dst) {
1522 dst_release(inet->cork.base.dst);
1523 inet->cork.base.dst = NULL;
1524 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1526 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1529 int ip6_push_pending_frames(struct sock *sk)
1531 struct sk_buff *skb, *tmp_skb;
1532 struct sk_buff **tail_skb;
1533 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1534 struct inet_sock *inet = inet_sk(sk);
1535 struct ipv6_pinfo *np = inet6_sk(sk);
1536 struct net *net = sock_net(sk);
1537 struct ipv6hdr *hdr;
1538 struct ipv6_txoptions *opt = np->cork.opt;
1539 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1540 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1541 unsigned char proto = fl6->flowi6_proto;
1542 int err = 0;
1544 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1545 goto out;
1546 tail_skb = &(skb_shinfo(skb)->frag_list);
1548 /* move skb->data to ip header from ext header */
1549 if (skb->data < skb_network_header(skb))
1550 __skb_pull(skb, skb_network_offset(skb));
1551 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1552 __skb_pull(tmp_skb, skb_network_header_len(skb));
1553 *tail_skb = tmp_skb;
1554 tail_skb = &(tmp_skb->next);
1555 skb->len += tmp_skb->len;
1556 skb->data_len += tmp_skb->len;
1557 skb->truesize += tmp_skb->truesize;
1558 tmp_skb->destructor = NULL;
1559 tmp_skb->sk = NULL;
1562 /* Allow local fragmentation. */
1563 skb->ignore_df = ip6_sk_ignore_df(sk);
1565 *final_dst = fl6->daddr;
1566 __skb_pull(skb, skb_network_header_len(skb));
1567 if (opt && opt->opt_flen)
1568 ipv6_push_frag_opts(skb, opt, &proto);
1569 if (opt && opt->opt_nflen)
1570 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1572 skb_push(skb, sizeof(struct ipv6hdr));
1573 skb_reset_network_header(skb);
1574 hdr = ipv6_hdr(skb);
1576 ip6_flow_hdr(hdr, np->cork.tclass,
1577 ip6_make_flowlabel(net, skb, fl6->flowlabel,
1578 np->autoflowlabel));
1579 hdr->hop_limit = np->cork.hop_limit;
1580 hdr->nexthdr = proto;
1581 hdr->saddr = fl6->saddr;
1582 hdr->daddr = *final_dst;
1584 skb->priority = sk->sk_priority;
1585 skb->mark = sk->sk_mark;
1587 skb_dst_set(skb, dst_clone(&rt->dst));
1588 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1589 if (proto == IPPROTO_ICMPV6) {
1590 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1592 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1593 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1596 err = ip6_local_out(skb);
1597 if (err) {
1598 if (err > 0)
1599 err = net_xmit_errno(err);
1600 if (err)
1601 goto error;
1604 out:
1605 ip6_cork_release(inet, np);
1606 return err;
1607 error:
1608 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1609 goto out;
1611 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1613 void ip6_flush_pending_frames(struct sock *sk)
1615 struct sk_buff *skb;
1617 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1618 if (skb_dst(skb))
1619 IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1620 IPSTATS_MIB_OUTDISCARDS);
1621 kfree_skb(skb);
1624 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1626 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);