2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 * See ip_input.c for original log
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * Hirokazu Takahashi: sendfile() on UDP works now.
45 #include <asm/uaccess.h>
46 #include <linux/module.h>
47 #include <linux/types.h>
48 #include <linux/kernel.h>
50 #include <linux/string.h>
51 #include <linux/errno.h>
52 #include <linux/highmem.h>
53 #include <linux/slab.h>
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
70 #include <linux/skbuff.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/netfilter_bridge.h>
79 #include <linux/mroute.h>
80 #include <linux/netlink.h>
81 #include <linux/tcp.h>
83 int sysctl_ip_default_ttl __read_mostly
= IPDEFTTL
;
84 EXPORT_SYMBOL(sysctl_ip_default_ttl
);
86 /* Generate a checksum for an outgoing IP datagram. */
87 void ip_send_check(struct iphdr
*iph
)
90 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
92 EXPORT_SYMBOL(ip_send_check
);
94 int __ip_local_out_sk(struct sock
*sk
, struct sk_buff
*skb
)
96 struct iphdr
*iph
= ip_hdr(skb
);
98 iph
->tot_len
= htons(skb
->len
);
100 return nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, sk
, skb
, NULL
,
101 skb_dst(skb
)->dev
, dst_output_sk
);
104 int __ip_local_out(struct sk_buff
*skb
)
106 return __ip_local_out_sk(skb
->sk
, skb
);
109 int ip_local_out_sk(struct sock
*sk
, struct sk_buff
*skb
)
113 err
= __ip_local_out(skb
);
114 if (likely(err
== 1))
115 err
= dst_output_sk(sk
, skb
);
119 EXPORT_SYMBOL_GPL(ip_local_out_sk
);
121 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
123 int ttl
= inet
->uc_ttl
;
126 ttl
= ip4_dst_hoplimit(dst
);
131 * Add an ip header to a skbuff and send it out.
134 int ip_build_and_send_pkt(struct sk_buff
*skb
, struct sock
*sk
,
135 __be32 saddr
, __be32 daddr
, struct ip_options_rcu
*opt
)
137 struct inet_sock
*inet
= inet_sk(sk
);
138 struct rtable
*rt
= skb_rtable(skb
);
141 /* Build the IP header. */
142 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->opt
.optlen
: 0));
143 skb_reset_network_header(skb
);
147 iph
->tos
= inet
->tos
;
148 if (ip_dont_fragment(sk
, &rt
->dst
))
149 iph
->frag_off
= htons(IP_DF
);
152 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
153 iph
->daddr
= (opt
&& opt
->opt
.srr
? opt
->opt
.faddr
: daddr
);
155 iph
->protocol
= sk
->sk_protocol
;
156 ip_select_ident(sock_net(sk
), skb
, sk
);
158 if (opt
&& opt
->opt
.optlen
) {
159 iph
->ihl
+= opt
->opt
.optlen
>>2;
160 ip_options_build(skb
, &opt
->opt
, daddr
, rt
, 0);
163 skb
->priority
= sk
->sk_priority
;
164 skb
->mark
= sk
->sk_mark
;
167 return ip_local_out(skb
);
169 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
171 static inline int ip_finish_output2(struct sock
*sk
, struct sk_buff
*skb
)
173 struct dst_entry
*dst
= skb_dst(skb
);
174 struct rtable
*rt
= (struct rtable
*)dst
;
175 struct net_device
*dev
= dst
->dev
;
176 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
177 struct neighbour
*neigh
;
180 if (rt
->rt_type
== RTN_MULTICAST
) {
181 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTMCAST
, skb
->len
);
182 } else if (rt
->rt_type
== RTN_BROADCAST
)
183 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTBCAST
, skb
->len
);
185 /* Be paranoid, rather than too clever. */
186 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
187 struct sk_buff
*skb2
;
189 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
195 skb_set_owner_w(skb2
, skb
->sk
);
201 nexthop
= (__force u32
) rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
202 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
203 if (unlikely(!neigh
))
204 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
205 if (!IS_ERR(neigh
)) {
206 int res
= dst_neigh_output(dst
, neigh
, skb
);
208 rcu_read_unlock_bh();
211 rcu_read_unlock_bh();
213 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
219 static int ip_finish_output_gso(struct sock
*sk
, struct sk_buff
*skb
)
221 netdev_features_t features
;
222 struct sk_buff
*segs
;
225 /* common case: locally created skb or seglen is <= mtu */
226 if (((IPCB(skb
)->flags
& IPSKB_FORWARDED
) == 0) ||
227 skb_gso_network_seglen(skb
) <= ip_skb_dst_mtu(skb
))
228 return ip_finish_output2(sk
, skb
);
230 /* Slowpath - GSO segment length is exceeding the dst MTU.
232 * This can happen in two cases:
233 * 1) TCP GRO packet, DF bit not set
234 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
235 * from host network stack.
237 features
= netif_skb_features(skb
);
238 BUILD_BUG_ON(sizeof(*IPCB(skb
)) > SKB_SGO_CB_OFFSET
);
239 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
240 if (IS_ERR_OR_NULL(segs
)) {
248 struct sk_buff
*nskb
= segs
->next
;
252 err
= ip_fragment(sk
, segs
, ip_finish_output2
);
262 static int ip_finish_output(struct sock
*sk
, struct sk_buff
*skb
)
264 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
265 /* Policy lookup after SNAT yielded a new policy */
266 if (skb_dst(skb
)->xfrm
) {
267 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
268 return dst_output_sk(sk
, skb
);
272 return ip_finish_output_gso(sk
, skb
);
274 if (skb
->len
> ip_skb_dst_mtu(skb
))
275 return ip_fragment(sk
, skb
, ip_finish_output2
);
277 return ip_finish_output2(sk
, skb
);
280 int ip_mc_output(struct sock
*sk
, struct sk_buff
*skb
)
282 struct rtable
*rt
= skb_rtable(skb
);
283 struct net_device
*dev
= rt
->dst
.dev
;
286 * If the indicated interface is up and running, send the packet.
288 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
291 skb
->protocol
= htons(ETH_P_IP
);
294 * Multicasts are looped back for other local users
297 if (rt
->rt_flags
&RTCF_MULTICAST
) {
299 #ifdef CONFIG_IP_MROUTE
300 /* Small optimization: do not loopback not local frames,
301 which returned after forwarding; they will be dropped
302 by ip_mr_input in any case.
303 Note, that local frames are looped back to be delivered
306 This check is duplicated in ip_mr_input at the moment.
309 ((rt
->rt_flags
& RTCF_LOCAL
) ||
310 !(IPCB(skb
)->flags
& IPSKB_FORWARDED
))
313 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
315 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
316 sk
, newskb
, NULL
, newskb
->dev
,
320 /* Multicasts with ttl 0 must not go beyond the host */
322 if (ip_hdr(skb
)->ttl
== 0) {
328 if (rt
->rt_flags
&RTCF_BROADCAST
) {
329 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
331 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, sk
, newskb
,
332 NULL
, newskb
->dev
, dev_loopback_xmit
);
335 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, sk
, skb
, NULL
,
336 skb
->dev
, ip_finish_output
,
337 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
340 int ip_output(struct sock
*sk
, struct sk_buff
*skb
)
342 struct net_device
*dev
= skb_dst(skb
)->dev
;
344 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
347 skb
->protocol
= htons(ETH_P_IP
);
349 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, sk
, skb
,
352 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
356 * copy saddr and daddr, possibly using 64bit load/stores
358 * iph->saddr = fl4->saddr;
359 * iph->daddr = fl4->daddr;
361 static void ip_copy_addrs(struct iphdr
*iph
, const struct flowi4
*fl4
)
363 BUILD_BUG_ON(offsetof(typeof(*fl4
), daddr
) !=
364 offsetof(typeof(*fl4
), saddr
) + sizeof(fl4
->saddr
));
365 memcpy(&iph
->saddr
, &fl4
->saddr
,
366 sizeof(fl4
->saddr
) + sizeof(fl4
->daddr
));
369 /* Note: skb->sk can be different from sk, in case of tunnels */
370 int ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
)
372 struct inet_sock
*inet
= inet_sk(sk
);
373 struct ip_options_rcu
*inet_opt
;
379 /* Skip all of this if the packet is already routed,
380 * f.e. by something like SCTP.
383 inet_opt
= rcu_dereference(inet
->inet_opt
);
385 rt
= skb_rtable(skb
);
389 /* Make sure we can route this packet. */
390 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
394 /* Use correct destination address if we have options. */
395 daddr
= inet
->inet_daddr
;
396 if (inet_opt
&& inet_opt
->opt
.srr
)
397 daddr
= inet_opt
->opt
.faddr
;
399 /* If this fails, retransmit mechanism of transport layer will
400 * keep trying until route appears or the connection times
403 rt
= ip_route_output_ports(sock_net(sk
), fl4
, sk
,
404 daddr
, inet
->inet_saddr
,
409 sk
->sk_bound_dev_if
);
412 sk_setup_caps(sk
, &rt
->dst
);
414 skb_dst_set_noref(skb
, &rt
->dst
);
417 if (inet_opt
&& inet_opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
420 /* OK, we know where to send it, allocate and build IP header. */
421 skb_push(skb
, sizeof(struct iphdr
) + (inet_opt
? inet_opt
->opt
.optlen
: 0));
422 skb_reset_network_header(skb
);
424 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
425 if (ip_dont_fragment(sk
, &rt
->dst
) && !skb
->ignore_df
)
426 iph
->frag_off
= htons(IP_DF
);
429 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
430 iph
->protocol
= sk
->sk_protocol
;
431 ip_copy_addrs(iph
, fl4
);
433 /* Transport layer set skb->h.foo itself. */
435 if (inet_opt
&& inet_opt
->opt
.optlen
) {
436 iph
->ihl
+= inet_opt
->opt
.optlen
>> 2;
437 ip_options_build(skb
, &inet_opt
->opt
, inet
->inet_daddr
, rt
, 0);
440 ip_select_ident_segs(sock_net(sk
), skb
, sk
,
441 skb_shinfo(skb
)->gso_segs
?: 1);
443 /* TODO : should we use skb->sk here instead of sk ? */
444 skb
->priority
= sk
->sk_priority
;
445 skb
->mark
= sk
->sk_mark
;
447 res
= ip_local_out(skb
);
453 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
455 return -EHOSTUNREACH
;
457 EXPORT_SYMBOL(ip_queue_xmit
);
459 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
461 to
->pkt_type
= from
->pkt_type
;
462 to
->priority
= from
->priority
;
463 to
->protocol
= from
->protocol
;
465 skb_dst_copy(to
, from
);
467 to
->mark
= from
->mark
;
469 /* Copy the flags to each fragment. */
470 IPCB(to
)->flags
= IPCB(from
)->flags
;
472 #ifdef CONFIG_NET_SCHED
473 to
->tc_index
= from
->tc_index
;
476 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
477 to
->ipvs_property
= from
->ipvs_property
;
479 skb_copy_secmark(to
, from
);
483 * This IP datagram is too large to be sent in one piece. Break it up into
484 * smaller pieces (each of size equal to IP header plus
485 * a block of the data of the original IP data part) that will yet fit in a
486 * single device frame, and queue such a frame for sending.
489 int ip_fragment(struct sock
*sk
, struct sk_buff
*skb
,
490 int (*output
)(struct sock
*, struct sk_buff
*))
494 struct net_device
*dev
;
495 struct sk_buff
*skb2
;
496 unsigned int mtu
, hlen
, left
, len
, ll_rs
;
498 __be16 not_last_frag
;
499 struct rtable
*rt
= skb_rtable(skb
);
505 * Point into the IP datagram header.
510 mtu
= ip_skb_dst_mtu(skb
);
511 if (unlikely(((iph
->frag_off
& htons(IP_DF
)) && !skb
->ignore_df
) ||
512 (IPCB(skb
)->frag_max_size
&&
513 IPCB(skb
)->frag_max_size
> mtu
))) {
514 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
515 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
522 * Setup starting values.
526 mtu
= mtu
- hlen
; /* Size of data space */
527 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
529 mtu
-= nf_bridge_mtu_reduction(skb
);
531 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
533 /* When frag_list is given, use it. First, check its validity:
534 * some transformers could create wrong frag_list or break existing
535 * one, it is not prohibited. In this case fall back to copying.
537 * LATER: this step can be merged to real generation of fragments,
538 * we can switch to copy when see the first bad fragment.
540 if (skb_has_frag_list(skb
)) {
541 struct sk_buff
*frag
, *frag2
;
542 int first_len
= skb_pagelen(skb
);
544 if (first_len
- hlen
> mtu
||
545 ((first_len
- hlen
) & 7) ||
546 ip_is_fragment(iph
) ||
550 skb_walk_frags(skb
, frag
) {
551 /* Correct geometry. */
552 if (frag
->len
> mtu
||
553 ((frag
->len
& 7) && frag
->next
) ||
554 skb_headroom(frag
) < hlen
)
555 goto slow_path_clean
;
557 /* Partially cloned skb? */
558 if (skb_shared(frag
))
559 goto slow_path_clean
;
564 frag
->destructor
= sock_wfree
;
566 skb
->truesize
-= frag
->truesize
;
569 /* Everything is OK. Generate! */
573 frag
= skb_shinfo(skb
)->frag_list
;
574 skb_frag_list_init(skb
);
575 skb
->data_len
= first_len
- skb_headlen(skb
);
576 skb
->len
= first_len
;
577 iph
->tot_len
= htons(first_len
);
578 iph
->frag_off
= htons(IP_MF
);
582 /* Prepare header of the next frame,
583 * before previous one went down. */
585 frag
->ip_summed
= CHECKSUM_NONE
;
586 skb_reset_transport_header(frag
);
587 __skb_push(frag
, hlen
);
588 skb_reset_network_header(frag
);
589 memcpy(skb_network_header(frag
), iph
, hlen
);
591 iph
->tot_len
= htons(frag
->len
);
592 ip_copy_metadata(frag
, skb
);
594 ip_options_fragment(frag
);
595 offset
+= skb
->len
- hlen
;
596 iph
->frag_off
= htons(offset
>>3);
598 iph
->frag_off
|= htons(IP_MF
);
599 /* Ready, complete checksum */
603 err
= output(sk
, skb
);
606 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
616 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
625 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
629 skb_walk_frags(skb
, frag2
) {
633 frag2
->destructor
= NULL
;
634 skb
->truesize
+= frag2
->truesize
;
639 /* for offloaded checksums cleanup checksum before fragmentation */
640 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) && skb_checksum_help(skb
))
644 left
= skb
->len
- hlen
; /* Space per frame */
645 ptr
= hlen
; /* Where to start from */
647 ll_rs
= LL_RESERVED_SPACE(rt
->dst
.dev
);
650 * Fragment the datagram.
653 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
654 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
657 * Keep copying data until we run out.
662 /* IF: it doesn't fit, use 'mtu' - the data space left */
665 /* IF: we are not sending up to and including the packet end
666 then align the next start on an eight byte boundary */
671 /* Allocate buffer */
672 skb2
= alloc_skb(len
+ hlen
+ ll_rs
, GFP_ATOMIC
);
679 * Set up data on packet
682 ip_copy_metadata(skb2
, skb
);
683 skb_reserve(skb2
, ll_rs
);
684 skb_put(skb2
, len
+ hlen
);
685 skb_reset_network_header(skb2
);
686 skb2
->transport_header
= skb2
->network_header
+ hlen
;
689 * Charge the memory for the fragment to any owner
694 skb_set_owner_w(skb2
, skb
->sk
);
697 * Copy the packet header into the new buffer.
700 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), hlen
);
703 * Copy a block of the IP datagram.
705 if (skb_copy_bits(skb
, ptr
, skb_transport_header(skb2
), len
))
710 * Fill in the new header fields.
713 iph
->frag_off
= htons((offset
>> 3));
715 /* ANK: dirty, but effective trick. Upgrade options only if
716 * the segment to be fragmented was THE FIRST (otherwise,
717 * options are already fixed) and make it ONCE
718 * on the initial skb, so that all the following fragments
719 * will inherit fixed options.
722 ip_options_fragment(skb
);
725 * Added AC : If we are fragmenting a fragment that's not the
726 * last fragment then keep MF on each bit
728 if (left
> 0 || not_last_frag
)
729 iph
->frag_off
|= htons(IP_MF
);
734 * Put this fragment into the sending queue.
736 iph
->tot_len
= htons(len
+ hlen
);
740 err
= output(sk
, skb2
);
744 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
747 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
752 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
755 EXPORT_SYMBOL(ip_fragment
);
758 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
760 struct msghdr
*msg
= from
;
762 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
763 if (copy_from_iter(to
, len
, &msg
->msg_iter
) != len
)
767 if (csum_and_copy_from_iter(to
, len
, &csum
, &msg
->msg_iter
) != len
)
769 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
773 EXPORT_SYMBOL(ip_generic_getfrag
);
776 csum_page(struct page
*page
, int offset
, int copy
)
781 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
786 static inline int ip_ufo_append_data(struct sock
*sk
,
787 struct sk_buff_head
*queue
,
788 int getfrag(void *from
, char *to
, int offset
, int len
,
789 int odd
, struct sk_buff
*skb
),
790 void *from
, int length
, int hh_len
, int fragheaderlen
,
791 int transhdrlen
, int maxfraglen
, unsigned int flags
)
796 /* There is support for UDP fragmentation offload by network
797 * device, so create one single skb packet containing complete
800 skb
= skb_peek_tail(queue
);
802 skb
= sock_alloc_send_skb(sk
,
803 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
804 (flags
& MSG_DONTWAIT
), &err
);
809 /* reserve space for Hardware header */
810 skb_reserve(skb
, hh_len
);
812 /* create space for UDP/IP header */
813 skb_put(skb
, fragheaderlen
+ transhdrlen
);
815 /* initialize network header pointer */
816 skb_reset_network_header(skb
);
818 /* initialize protocol header pointer */
819 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
823 __skb_queue_tail(queue
, skb
);
824 } else if (skb_is_gso(skb
)) {
828 skb
->ip_summed
= CHECKSUM_PARTIAL
;
829 /* specify the length of each IP datagram fragment */
830 skb_shinfo(skb
)->gso_size
= maxfraglen
- fragheaderlen
;
831 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
834 return skb_append_datato_frags(sk
, skb
, getfrag
, from
,
835 (length
- transhdrlen
));
838 static int __ip_append_data(struct sock
*sk
,
840 struct sk_buff_head
*queue
,
841 struct inet_cork
*cork
,
842 struct page_frag
*pfrag
,
843 int getfrag(void *from
, char *to
, int offset
,
844 int len
, int odd
, struct sk_buff
*skb
),
845 void *from
, int length
, int transhdrlen
,
848 struct inet_sock
*inet
= inet_sk(sk
);
851 struct ip_options
*opt
= cork
->opt
;
858 unsigned int maxfraglen
, fragheaderlen
, maxnonfragsize
;
859 int csummode
= CHECKSUM_NONE
;
860 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
863 skb
= skb_peek_tail(queue
);
865 exthdrlen
= !skb
? rt
->dst
.header_len
: 0;
866 mtu
= cork
->fragsize
;
867 if (cork
->tx_flags
& SKBTX_ANY_SW_TSTAMP
&&
868 sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
)
869 tskey
= sk
->sk_tskey
++;
871 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
873 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
874 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
875 maxnonfragsize
= ip_sk_ignore_df(sk
) ? 0xFFFF : mtu
;
877 if (cork
->length
+ length
> maxnonfragsize
- fragheaderlen
) {
878 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
879 mtu
- (opt
? opt
->optlen
: 0));
884 * transhdrlen > 0 means that this is the first fragment and we wish
885 * it won't be fragmented in the future.
888 length
+ fragheaderlen
<= mtu
&&
889 rt
->dst
.dev
->features
& NETIF_F_V4_CSUM
&&
891 csummode
= CHECKSUM_PARTIAL
;
893 cork
->length
+= length
;
894 if (((length
> mtu
) || (skb
&& skb_is_gso(skb
))) &&
895 (sk
->sk_protocol
== IPPROTO_UDP
) &&
896 (rt
->dst
.dev
->features
& NETIF_F_UFO
) && !rt
->dst
.header_len
&&
897 (sk
->sk_type
== SOCK_DGRAM
) && !sk
->sk_no_check_tx
) {
898 err
= ip_ufo_append_data(sk
, queue
, getfrag
, from
, length
,
899 hh_len
, fragheaderlen
, transhdrlen
,
906 /* So, what's going on in the loop below?
908 * We use calculated fragment length to generate chained skb,
909 * each of segments is IP fragment ready for sending to network after
910 * adding appropriate IP header.
917 /* Check if the remaining data fits into current packet. */
918 copy
= mtu
- skb
->len
;
920 copy
= maxfraglen
- skb
->len
;
923 unsigned int datalen
;
924 unsigned int fraglen
;
925 unsigned int fraggap
;
926 unsigned int alloclen
;
927 struct sk_buff
*skb_prev
;
931 fraggap
= skb_prev
->len
- maxfraglen
;
936 * If remaining data exceeds the mtu,
937 * we know we need more fragment(s).
939 datalen
= length
+ fraggap
;
940 if (datalen
> mtu
- fragheaderlen
)
941 datalen
= maxfraglen
- fragheaderlen
;
942 fraglen
= datalen
+ fragheaderlen
;
944 if ((flags
& MSG_MORE
) &&
945 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
950 alloclen
+= exthdrlen
;
952 /* The last fragment gets additional space at tail.
953 * Note, with MSG_MORE we overallocate on fragments,
954 * because we have no idea what fragment will be
957 if (datalen
== length
+ fraggap
)
958 alloclen
+= rt
->dst
.trailer_len
;
961 skb
= sock_alloc_send_skb(sk
,
962 alloclen
+ hh_len
+ 15,
963 (flags
& MSG_DONTWAIT
), &err
);
966 if (atomic_read(&sk
->sk_wmem_alloc
) <=
968 skb
= sock_wmalloc(sk
,
969 alloclen
+ hh_len
+ 15, 1,
978 * Fill in the control structures
980 skb
->ip_summed
= csummode
;
982 skb_reserve(skb
, hh_len
);
984 /* only the initial fragment is time stamped */
985 skb_shinfo(skb
)->tx_flags
= cork
->tx_flags
;
987 skb_shinfo(skb
)->tskey
= tskey
;
991 * Find where to start putting bytes.
993 data
= skb_put(skb
, fraglen
+ exthdrlen
);
994 skb_set_network_header(skb
, exthdrlen
);
995 skb
->transport_header
= (skb
->network_header
+
997 data
+= fragheaderlen
+ exthdrlen
;
1000 skb
->csum
= skb_copy_and_csum_bits(
1001 skb_prev
, maxfraglen
,
1002 data
+ transhdrlen
, fraggap
, 0);
1003 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1006 pskb_trim_unique(skb_prev
, maxfraglen
);
1009 copy
= datalen
- transhdrlen
- fraggap
;
1010 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1017 length
-= datalen
- fraggap
;
1020 csummode
= CHECKSUM_NONE
;
1023 * Put the packet on the pending queue.
1025 __skb_queue_tail(queue
, skb
);
1032 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1036 if (getfrag(from
, skb_put(skb
, copy
),
1037 offset
, copy
, off
, skb
) < 0) {
1038 __skb_trim(skb
, off
);
1043 int i
= skb_shinfo(skb
)->nr_frags
;
1046 if (!sk_page_frag_refill(sk
, pfrag
))
1049 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1052 if (i
== MAX_SKB_FRAGS
)
1055 __skb_fill_page_desc(skb
, i
, pfrag
->page
,
1057 skb_shinfo(skb
)->nr_frags
= ++i
;
1058 get_page(pfrag
->page
);
1060 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1062 page_address(pfrag
->page
) + pfrag
->offset
,
1063 offset
, copy
, skb
->len
, skb
) < 0)
1066 pfrag
->offset
+= copy
;
1067 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1069 skb
->data_len
+= copy
;
1070 skb
->truesize
+= copy
;
1071 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1082 cork
->length
-= length
;
1083 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1087 static int ip_setup_cork(struct sock
*sk
, struct inet_cork
*cork
,
1088 struct ipcm_cookie
*ipc
, struct rtable
**rtp
)
1090 struct ip_options_rcu
*opt
;
1094 * setup for corking.
1099 cork
->opt
= kmalloc(sizeof(struct ip_options
) + 40,
1101 if (unlikely(!cork
->opt
))
1104 memcpy(cork
->opt
, &opt
->opt
, sizeof(struct ip_options
) + opt
->opt
.optlen
);
1105 cork
->flags
|= IPCORK_OPT
;
1106 cork
->addr
= ipc
->addr
;
1112 * We steal reference to this route, caller should not release it
1115 cork
->fragsize
= ip_sk_use_pmtu(sk
) ?
1116 dst_mtu(&rt
->dst
) : rt
->dst
.dev
->mtu
;
1117 cork
->dst
= &rt
->dst
;
1119 cork
->ttl
= ipc
->ttl
;
1120 cork
->tos
= ipc
->tos
;
1121 cork
->priority
= ipc
->priority
;
1122 cork
->tx_flags
= ipc
->tx_flags
;
1128 * ip_append_data() and ip_append_page() can make one large IP datagram
1129 * from many pieces of data. Each pieces will be holded on the socket
1130 * until ip_push_pending_frames() is called. Each piece can be a page
1133 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1134 * this interface potentially.
1136 * LATER: length must be adjusted by pad at tail, when it is required.
1138 int ip_append_data(struct sock
*sk
, struct flowi4
*fl4
,
1139 int getfrag(void *from
, char *to
, int offset
, int len
,
1140 int odd
, struct sk_buff
*skb
),
1141 void *from
, int length
, int transhdrlen
,
1142 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1145 struct inet_sock
*inet
= inet_sk(sk
);
1148 if (flags
&MSG_PROBE
)
1151 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1152 err
= ip_setup_cork(sk
, &inet
->cork
.base
, ipc
, rtp
);
1159 return __ip_append_data(sk
, fl4
, &sk
->sk_write_queue
, &inet
->cork
.base
,
1160 sk_page_frag(sk
), getfrag
,
1161 from
, length
, transhdrlen
, flags
);
1164 ssize_t
ip_append_page(struct sock
*sk
, struct flowi4
*fl4
, struct page
*page
,
1165 int offset
, size_t size
, int flags
)
1167 struct inet_sock
*inet
= inet_sk(sk
);
1168 struct sk_buff
*skb
;
1170 struct ip_options
*opt
= NULL
;
1171 struct inet_cork
*cork
;
1176 unsigned int maxfraglen
, fragheaderlen
, fraggap
, maxnonfragsize
;
1181 if (flags
&MSG_PROBE
)
1184 if (skb_queue_empty(&sk
->sk_write_queue
))
1187 cork
= &inet
->cork
.base
;
1188 rt
= (struct rtable
*)cork
->dst
;
1189 if (cork
->flags
& IPCORK_OPT
)
1192 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
))
1195 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1196 mtu
= cork
->fragsize
;
1198 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1199 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1200 maxnonfragsize
= ip_sk_ignore_df(sk
) ? 0xFFFF : mtu
;
1202 if (cork
->length
+ size
> maxnonfragsize
- fragheaderlen
) {
1203 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
1204 mtu
- (opt
? opt
->optlen
: 0));
1208 skb
= skb_peek_tail(&sk
->sk_write_queue
);
1212 cork
->length
+= size
;
1213 if ((size
+ skb
->len
> mtu
) &&
1214 (sk
->sk_protocol
== IPPROTO_UDP
) &&
1215 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
1216 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
1217 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1223 if (skb_is_gso(skb
))
1227 /* Check if the remaining data fits into current packet. */
1228 len
= mtu
- skb
->len
;
1230 len
= maxfraglen
- skb
->len
;
1233 struct sk_buff
*skb_prev
;
1237 fraggap
= skb_prev
->len
- maxfraglen
;
1239 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1240 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1241 if (unlikely(!skb
)) {
1247 * Fill in the control structures
1249 skb
->ip_summed
= CHECKSUM_NONE
;
1251 skb_reserve(skb
, hh_len
);
1254 * Find where to start putting bytes.
1256 skb_put(skb
, fragheaderlen
+ fraggap
);
1257 skb_reset_network_header(skb
);
1258 skb
->transport_header
= (skb
->network_header
+
1261 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1263 skb_transport_header(skb
),
1265 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1267 pskb_trim_unique(skb_prev
, maxfraglen
);
1271 * Put the packet on the pending queue.
1273 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1277 i
= skb_shinfo(skb
)->nr_frags
;
1280 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
1281 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
-1], len
);
1282 } else if (i
< MAX_SKB_FRAGS
) {
1284 skb_fill_page_desc(skb
, i
, page
, offset
, len
);
1290 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1292 csum
= csum_page(page
, offset
, len
);
1293 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1297 skb
->data_len
+= len
;
1298 skb
->truesize
+= len
;
1299 atomic_add(len
, &sk
->sk_wmem_alloc
);
1306 cork
->length
-= size
;
1307 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1311 static void ip_cork_release(struct inet_cork
*cork
)
1313 cork
->flags
&= ~IPCORK_OPT
;
1316 dst_release(cork
->dst
);
1321 * Combined all pending IP fragments on the socket as one IP datagram
1322 * and push them out.
1324 struct sk_buff
*__ip_make_skb(struct sock
*sk
,
1326 struct sk_buff_head
*queue
,
1327 struct inet_cork
*cork
)
1329 struct sk_buff
*skb
, *tmp_skb
;
1330 struct sk_buff
**tail_skb
;
1331 struct inet_sock
*inet
= inet_sk(sk
);
1332 struct net
*net
= sock_net(sk
);
1333 struct ip_options
*opt
= NULL
;
1334 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
1339 skb
= __skb_dequeue(queue
);
1342 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1344 /* move skb->data to ip header from ext header */
1345 if (skb
->data
< skb_network_header(skb
))
1346 __skb_pull(skb
, skb_network_offset(skb
));
1347 while ((tmp_skb
= __skb_dequeue(queue
)) != NULL
) {
1348 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1349 *tail_skb
= tmp_skb
;
1350 tail_skb
= &(tmp_skb
->next
);
1351 skb
->len
+= tmp_skb
->len
;
1352 skb
->data_len
+= tmp_skb
->len
;
1353 skb
->truesize
+= tmp_skb
->truesize
;
1354 tmp_skb
->destructor
= NULL
;
1358 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1359 * to fragment the frame generated here. No matter, what transforms
1360 * how transforms change size of the packet, it will come out.
1362 skb
->ignore_df
= ip_sk_ignore_df(sk
);
1364 /* DF bit is set when we want to see DF on outgoing frames.
1365 * If ignore_df is set too, we still allow to fragment this frame
1367 if (inet
->pmtudisc
== IP_PMTUDISC_DO
||
1368 inet
->pmtudisc
== IP_PMTUDISC_PROBE
||
1369 (skb
->len
<= dst_mtu(&rt
->dst
) &&
1370 ip_dont_fragment(sk
, &rt
->dst
)))
1373 if (cork
->flags
& IPCORK_OPT
)
1378 else if (rt
->rt_type
== RTN_MULTICAST
)
1381 ttl
= ip_select_ttl(inet
, &rt
->dst
);
1386 iph
->tos
= (cork
->tos
!= -1) ? cork
->tos
: inet
->tos
;
1389 iph
->protocol
= sk
->sk_protocol
;
1390 ip_copy_addrs(iph
, fl4
);
1391 ip_select_ident(net
, skb
, sk
);
1394 iph
->ihl
+= opt
->optlen
>>2;
1395 ip_options_build(skb
, opt
, cork
->addr
, rt
, 0);
1398 skb
->priority
= (cork
->tos
!= -1) ? cork
->priority
: sk
->sk_priority
;
1399 skb
->mark
= sk
->sk_mark
;
1401 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1405 skb_dst_set(skb
, &rt
->dst
);
1407 if (iph
->protocol
== IPPROTO_ICMP
)
1408 icmp_out_count(net
, ((struct icmphdr
*)
1409 skb_transport_header(skb
))->type
);
1411 ip_cork_release(cork
);
1416 int ip_send_skb(struct net
*net
, struct sk_buff
*skb
)
1420 err
= ip_local_out(skb
);
1423 err
= net_xmit_errno(err
);
1425 IP_INC_STATS(net
, IPSTATS_MIB_OUTDISCARDS
);
1431 int ip_push_pending_frames(struct sock
*sk
, struct flowi4
*fl4
)
1433 struct sk_buff
*skb
;
1435 skb
= ip_finish_skb(sk
, fl4
);
1439 /* Netfilter gets whole the not fragmented skb. */
1440 return ip_send_skb(sock_net(sk
), skb
);
1444 * Throw away all pending data on the socket.
1446 static void __ip_flush_pending_frames(struct sock
*sk
,
1447 struct sk_buff_head
*queue
,
1448 struct inet_cork
*cork
)
1450 struct sk_buff
*skb
;
1452 while ((skb
= __skb_dequeue_tail(queue
)) != NULL
)
1455 ip_cork_release(cork
);
1458 void ip_flush_pending_frames(struct sock
*sk
)
1460 __ip_flush_pending_frames(sk
, &sk
->sk_write_queue
, &inet_sk(sk
)->cork
.base
);
1463 struct sk_buff
*ip_make_skb(struct sock
*sk
,
1465 int getfrag(void *from
, char *to
, int offset
,
1466 int len
, int odd
, struct sk_buff
*skb
),
1467 void *from
, int length
, int transhdrlen
,
1468 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1471 struct inet_cork cork
;
1472 struct sk_buff_head queue
;
1475 if (flags
& MSG_PROBE
)
1478 __skb_queue_head_init(&queue
);
1483 err
= ip_setup_cork(sk
, &cork
, ipc
, rtp
);
1485 return ERR_PTR(err
);
1487 err
= __ip_append_data(sk
, fl4
, &queue
, &cork
,
1488 ¤t
->task_frag
, getfrag
,
1489 from
, length
, transhdrlen
, flags
);
1491 __ip_flush_pending_frames(sk
, &queue
, &cork
);
1492 return ERR_PTR(err
);
1495 return __ip_make_skb(sk
, fl4
, &queue
, &cork
);
1499 * Fetch data from kernel space and fill in checksum if needed.
1501 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1502 int len
, int odd
, struct sk_buff
*skb
)
1506 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1507 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1512 * Generic function to send a packet as reply to another packet.
1513 * Used to send some TCP resets/acks so far.
1515 void ip_send_unicast_reply(struct sock
*sk
, struct sk_buff
*skb
,
1516 const struct ip_options
*sopt
,
1517 __be32 daddr
, __be32 saddr
,
1518 const struct ip_reply_arg
*arg
,
1521 struct ip_options_data replyopts
;
1522 struct ipcm_cookie ipc
;
1524 struct rtable
*rt
= skb_rtable(skb
);
1525 struct net
*net
= sock_net(sk
);
1526 struct sk_buff
*nskb
;
1529 if (__ip_options_echo(&replyopts
.opt
.opt
, skb
, sopt
))
1538 if (replyopts
.opt
.opt
.optlen
) {
1539 ipc
.opt
= &replyopts
.opt
;
1541 if (replyopts
.opt
.opt
.srr
)
1542 daddr
= replyopts
.opt
.opt
.faddr
;
1545 flowi4_init_output(&fl4
, arg
->bound_dev_if
,
1546 IP4_REPLY_MARK(net
, skb
->mark
),
1548 RT_SCOPE_UNIVERSE
, ip_hdr(skb
)->protocol
,
1549 ip_reply_arg_flowi_flags(arg
),
1551 tcp_hdr(skb
)->source
, tcp_hdr(skb
)->dest
);
1552 security_skb_classify_flow(skb
, flowi4_to_flowi(&fl4
));
1553 rt
= ip_route_output_key(net
, &fl4
);
1557 inet_sk(sk
)->tos
= arg
->tos
;
1559 sk
->sk_priority
= skb
->priority
;
1560 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1561 sk
->sk_bound_dev_if
= arg
->bound_dev_if
;
1562 sk
->sk_sndbuf
= sysctl_wmem_default
;
1563 err
= ip_append_data(sk
, &fl4
, ip_reply_glue_bits
, arg
->iov
->iov_base
,
1564 len
, 0, &ipc
, &rt
, MSG_DONTWAIT
);
1565 if (unlikely(err
)) {
1566 ip_flush_pending_frames(sk
);
1570 nskb
= skb_peek(&sk
->sk_write_queue
);
1572 if (arg
->csumoffset
>= 0)
1573 *((__sum16
*)skb_transport_header(nskb
) +
1574 arg
->csumoffset
) = csum_fold(csum_add(nskb
->csum
,
1576 nskb
->ip_summed
= CHECKSUM_NONE
;
1577 skb_set_queue_mapping(nskb
, skb_get_queue_mapping(skb
));
1578 ip_push_pending_frames(sk
, &fl4
);
1584 void __init
ip_init(void)
1589 #if defined(CONFIG_IP_MULTICAST)