2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 * See ip_input.c for original log
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * Hirokazu Takahashi: sendfile() on UDP works now.
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
71 #include <linux/skbuff.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
84 int sysctl_ip_default_ttl __read_mostly
= IPDEFTTL
;
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__
void ip_send_check(struct iphdr
*iph
)
90 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
92 EXPORT_SYMBOL(ip_send_check
);
94 int __ip_local_out(struct sk_buff
*skb
)
96 struct iphdr
*iph
= ip_hdr(skb
);
98 iph
->tot_len
= htons(skb
->len
);
100 return nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, skb
, NULL
,
101 skb_dst(skb
)->dev
, dst_output
);
104 int ip_local_out(struct sk_buff
*skb
)
108 err
= __ip_local_out(skb
);
109 if (likely(err
== 1))
110 err
= dst_output(skb
);
114 EXPORT_SYMBOL_GPL(ip_local_out
);
116 /* dev_loopback_xmit for use with netfilter. */
117 static int ip_dev_loopback_xmit(struct sk_buff
*newskb
)
119 skb_reset_mac_header(newskb
);
120 __skb_pull(newskb
, skb_network_offset(newskb
));
121 newskb
->pkt_type
= PACKET_LOOPBACK
;
122 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
123 WARN_ON(!skb_dst(newskb
));
128 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
130 int ttl
= inet
->uc_ttl
;
133 ttl
= dst_metric(dst
, RTAX_HOPLIMIT
);
138 * Add an ip header to a skbuff and send it out.
141 int ip_build_and_send_pkt(struct sk_buff
*skb
, struct sock
*sk
,
142 __be32 saddr
, __be32 daddr
, struct ip_options
*opt
)
144 struct inet_sock
*inet
= inet_sk(sk
);
145 struct rtable
*rt
= skb_rtable(skb
);
148 /* Build the IP header. */
149 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
150 skb_reset_network_header(skb
);
154 iph
->tos
= inet
->tos
;
155 if (ip_dont_fragment(sk
, &rt
->dst
))
156 iph
->frag_off
= htons(IP_DF
);
159 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
160 iph
->daddr
= rt
->rt_dst
;
161 iph
->saddr
= rt
->rt_src
;
162 iph
->protocol
= sk
->sk_protocol
;
163 ip_select_ident(iph
, &rt
->dst
, sk
);
165 if (opt
&& opt
->optlen
) {
166 iph
->ihl
+= opt
->optlen
>>2;
167 ip_options_build(skb
, opt
, daddr
, rt
, 0);
170 skb
->priority
= sk
->sk_priority
;
171 skb
->mark
= sk
->sk_mark
;
174 return ip_local_out(skb
);
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
178 static inline int ip_finish_output2(struct sk_buff
*skb
)
180 struct dst_entry
*dst
= skb_dst(skb
);
181 struct rtable
*rt
= (struct rtable
*)dst
;
182 struct net_device
*dev
= dst
->dev
;
183 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
185 if (rt
->rt_type
== RTN_MULTICAST
) {
186 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTMCAST
, skb
->len
);
187 } else if (rt
->rt_type
== RTN_BROADCAST
)
188 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTBCAST
, skb
->len
);
190 /* Be paranoid, rather than too clever. */
191 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
192 struct sk_buff
*skb2
;
194 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
200 skb_set_owner_w(skb2
, skb
->sk
);
206 return neigh_hh_output(dst
->hh
, skb
);
207 else if (dst
->neighbour
)
208 return dst
->neighbour
->output(skb
);
211 printk(KERN_DEBUG
"ip_finish_output2: No header cache and no neighbour!\n");
216 static inline int ip_skb_dst_mtu(struct sk_buff
*skb
)
218 struct inet_sock
*inet
= skb
->sk
? inet_sk(skb
->sk
) : NULL
;
220 return (inet
&& inet
->pmtudisc
== IP_PMTUDISC_PROBE
) ?
221 skb_dst(skb
)->dev
->mtu
: dst_mtu(skb_dst(skb
));
224 static int ip_finish_output(struct sk_buff
*skb
)
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227 /* Policy lookup after SNAT yielded a new policy */
228 if (skb_dst(skb
)->xfrm
!= NULL
) {
229 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
230 return dst_output(skb
);
233 if (skb
->len
> ip_skb_dst_mtu(skb
) && !skb_is_gso(skb
))
234 return ip_fragment(skb
, ip_finish_output2
);
236 return ip_finish_output2(skb
);
239 int ip_mc_output(struct sk_buff
*skb
)
241 struct sock
*sk
= skb
->sk
;
242 struct rtable
*rt
= skb_rtable(skb
);
243 struct net_device
*dev
= rt
->dst
.dev
;
246 * If the indicated interface is up and running, send the packet.
248 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
251 skb
->protocol
= htons(ETH_P_IP
);
254 * Multicasts are looped back for other local users
257 if (rt
->rt_flags
&RTCF_MULTICAST
) {
259 #ifdef CONFIG_IP_MROUTE
260 /* Small optimization: do not loopback not local frames,
261 which returned after forwarding; they will be dropped
262 by ip_mr_input in any case.
263 Note, that local frames are looped back to be delivered
266 This check is duplicated in ip_mr_input at the moment.
269 ((rt
->rt_flags
& RTCF_LOCAL
) ||
270 !(IPCB(skb
)->flags
& IPSKB_FORWARDED
))
273 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
275 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
276 newskb
, NULL
, newskb
->dev
,
277 ip_dev_loopback_xmit
);
280 /* Multicasts with ttl 0 must not go beyond the host */
282 if (ip_hdr(skb
)->ttl
== 0) {
288 if (rt
->rt_flags
&RTCF_BROADCAST
) {
289 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
291 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, newskb
,
292 NULL
, newskb
->dev
, ip_dev_loopback_xmit
);
295 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, skb
, NULL
,
296 skb
->dev
, ip_finish_output
,
297 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
300 int ip_output(struct sk_buff
*skb
)
302 struct net_device
*dev
= skb_dst(skb
)->dev
;
304 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
307 skb
->protocol
= htons(ETH_P_IP
);
309 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, skb
, NULL
, dev
,
311 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
314 int ip_queue_xmit(struct sk_buff
*skb
)
316 struct sock
*sk
= skb
->sk
;
317 struct inet_sock
*inet
= inet_sk(sk
);
318 struct ip_options
*opt
= inet
->opt
;
323 /* Skip all of this if the packet is already routed,
324 * f.e. by something like SCTP.
327 rt
= skb_rtable(skb
);
331 /* Make sure we can route this packet. */
332 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
336 /* Use correct destination address if we have options. */
337 daddr
= inet
->inet_daddr
;
342 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
346 .saddr
= inet
->inet_saddr
,
347 .tos
= RT_CONN_FLAGS(sk
) } },
348 .proto
= sk
->sk_protocol
,
349 .flags
= inet_sk_flowi_flags(sk
),
351 { .sport
= inet
->inet_sport
,
352 .dport
= inet
->inet_dport
} } };
354 /* If this fails, retransmit mechanism of transport layer will
355 * keep trying until route appears or the connection times
358 security_sk_classify_flow(sk
, &fl
);
359 if (ip_route_output_flow(sock_net(sk
), &rt
, &fl
, sk
, 0))
362 sk_setup_caps(sk
, &rt
->dst
);
364 skb_dst_set_noref(skb
, &rt
->dst
);
367 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
)
370 /* OK, we know where to send it, allocate and build IP header. */
371 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
372 skb_reset_network_header(skb
);
374 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
375 if (ip_dont_fragment(sk
, &rt
->dst
) && !skb
->local_df
)
376 iph
->frag_off
= htons(IP_DF
);
379 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
380 iph
->protocol
= sk
->sk_protocol
;
381 iph
->saddr
= rt
->rt_src
;
382 iph
->daddr
= rt
->rt_dst
;
383 /* Transport layer set skb->h.foo itself. */
385 if (opt
&& opt
->optlen
) {
386 iph
->ihl
+= opt
->optlen
>> 2;
387 ip_options_build(skb
, opt
, inet
->inet_daddr
, rt
, 0);
390 ip_select_ident_more(iph
, &rt
->dst
, sk
,
391 (skb_shinfo(skb
)->gso_segs
?: 1) - 1);
393 skb
->priority
= sk
->sk_priority
;
394 skb
->mark
= sk
->sk_mark
;
396 res
= ip_local_out(skb
);
402 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
404 return -EHOSTUNREACH
;
406 EXPORT_SYMBOL(ip_queue_xmit
);
409 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
411 to
->pkt_type
= from
->pkt_type
;
412 to
->priority
= from
->priority
;
413 to
->protocol
= from
->protocol
;
415 skb_dst_copy(to
, from
);
417 to
->mark
= from
->mark
;
419 /* Copy the flags to each fragment. */
420 IPCB(to
)->flags
= IPCB(from
)->flags
;
422 #ifdef CONFIG_NET_SCHED
423 to
->tc_index
= from
->tc_index
;
426 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428 to
->nf_trace
= from
->nf_trace
;
430 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431 to
->ipvs_property
= from
->ipvs_property
;
433 skb_copy_secmark(to
, from
);
437 * This IP datagram is too large to be sent in one piece. Break it up into
438 * smaller pieces (each of size equal to IP header plus
439 * a block of the data of the original IP data part) that will yet fit in a
440 * single device frame, and queue such a frame for sending.
443 int ip_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
447 struct net_device
*dev
;
448 struct sk_buff
*skb2
;
449 unsigned int mtu
, hlen
, left
, len
, ll_rs
;
451 __be16 not_last_frag
;
452 struct rtable
*rt
= skb_rtable(skb
);
458 * Point into the IP datagram header.
463 if (unlikely((iph
->frag_off
& htons(IP_DF
)) && !skb
->local_df
)) {
464 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
465 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
466 htonl(ip_skb_dst_mtu(skb
)));
472 * Setup starting values.
476 mtu
= dst_mtu(&rt
->dst
) - hlen
; /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
479 mtu
-= nf_bridge_mtu_reduction(skb
);
481 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
483 /* When frag_list is given, use it. First, check its validity:
484 * some transformers could create wrong frag_list or break existing
485 * one, it is not prohibited. In this case fall back to copying.
487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment.
490 if (skb_has_frags(skb
)) {
491 struct sk_buff
*frag
;
492 int first_len
= skb_pagelen(skb
);
495 if (first_len
- hlen
> mtu
||
496 ((first_len
- hlen
) & 7) ||
497 (iph
->frag_off
& htons(IP_MF
|IP_OFFSET
)) ||
501 skb_walk_frags(skb
, frag
) {
502 /* Correct geometry. */
503 if (frag
->len
> mtu
||
504 ((frag
->len
& 7) && frag
->next
) ||
505 skb_headroom(frag
) < hlen
)
508 /* Partially cloned skb? */
509 if (skb_shared(frag
))
515 frag
->destructor
= sock_wfree
;
517 truesizes
+= frag
->truesize
;
520 /* Everything is OK. Generate! */
524 frag
= skb_shinfo(skb
)->frag_list
;
525 skb_frag_list_init(skb
);
526 skb
->data_len
= first_len
- skb_headlen(skb
);
527 skb
->truesize
-= truesizes
;
528 skb
->len
= first_len
;
529 iph
->tot_len
= htons(first_len
);
530 iph
->frag_off
= htons(IP_MF
);
534 /* Prepare header of the next frame,
535 * before previous one went down. */
537 frag
->ip_summed
= CHECKSUM_NONE
;
538 skb_reset_transport_header(frag
);
539 __skb_push(frag
, hlen
);
540 skb_reset_network_header(frag
);
541 memcpy(skb_network_header(frag
), iph
, hlen
);
543 iph
->tot_len
= htons(frag
->len
);
544 ip_copy_metadata(frag
, skb
);
546 ip_options_fragment(frag
);
547 offset
+= skb
->len
- hlen
;
548 iph
->frag_off
= htons(offset
>>3);
549 if (frag
->next
!= NULL
)
550 iph
->frag_off
|= htons(IP_MF
);
551 /* Ready, complete checksum */
558 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
568 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
577 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
582 left
= skb
->len
- hlen
; /* Space per frame */
583 ptr
= hlen
; /* Where to start from */
585 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
586 * we need to make room for the encapsulating header
588 ll_rs
= LL_RESERVED_SPACE_EXTRA(rt
->dst
.dev
, nf_bridge_pad(skb
));
591 * Fragment the datagram.
594 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
595 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
598 * Keep copying data until we run out.
603 /* IF: it doesn't fit, use 'mtu' - the data space left */
606 /* IF: we are not sending upto and including the packet end
607 then align the next start on an eight byte boundary */
615 if ((skb2
= alloc_skb(len
+hlen
+ll_rs
, GFP_ATOMIC
)) == NULL
) {
616 NETDEBUG(KERN_INFO
"IP: frag: no memory for new fragment!\n");
622 * Set up data on packet
625 ip_copy_metadata(skb2
, skb
);
626 skb_reserve(skb2
, ll_rs
);
627 skb_put(skb2
, len
+ hlen
);
628 skb_reset_network_header(skb2
);
629 skb2
->transport_header
= skb2
->network_header
+ hlen
;
632 * Charge the memory for the fragment to any owner
637 skb_set_owner_w(skb2
, skb
->sk
);
640 * Copy the packet header into the new buffer.
643 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), hlen
);
646 * Copy a block of the IP datagram.
648 if (skb_copy_bits(skb
, ptr
, skb_transport_header(skb2
), len
))
653 * Fill in the new header fields.
656 iph
->frag_off
= htons((offset
>> 3));
658 /* ANK: dirty, but effective trick. Upgrade options only if
659 * the segment to be fragmented was THE FIRST (otherwise,
660 * options are already fixed) and make it ONCE
661 * on the initial skb, so that all the following fragments
662 * will inherit fixed options.
665 ip_options_fragment(skb
);
668 * Added AC : If we are fragmenting a fragment that's not the
669 * last fragment then keep MF on each bit
671 if (left
> 0 || not_last_frag
)
672 iph
->frag_off
|= htons(IP_MF
);
677 * Put this fragment into the sending queue.
679 iph
->tot_len
= htons(len
+ hlen
);
687 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
690 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
695 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
698 EXPORT_SYMBOL(ip_fragment
);
701 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
703 struct iovec
*iov
= from
;
705 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
706 if (memcpy_fromiovecend(to
, iov
, offset
, len
) < 0)
710 if (csum_partial_copy_fromiovecend(to
, iov
, offset
, len
, &csum
) < 0)
712 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
716 EXPORT_SYMBOL(ip_generic_getfrag
);
719 csum_page(struct page
*page
, int offset
, int copy
)
724 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
729 static inline int ip_ufo_append_data(struct sock
*sk
,
730 int getfrag(void *from
, char *to
, int offset
, int len
,
731 int odd
, struct sk_buff
*skb
),
732 void *from
, int length
, int hh_len
, int fragheaderlen
,
733 int transhdrlen
, int mtu
, unsigned int flags
)
738 /* There is support for UDP fragmentation offload by network
739 * device, so create one single skb packet containing complete
742 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
743 skb
= sock_alloc_send_skb(sk
,
744 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
745 (flags
& MSG_DONTWAIT
), &err
);
750 /* reserve space for Hardware header */
751 skb_reserve(skb
, hh_len
);
753 /* create space for UDP/IP header */
754 skb_put(skb
, fragheaderlen
+ transhdrlen
);
756 /* initialize network header pointer */
757 skb_reset_network_header(skb
);
759 /* initialize protocol header pointer */
760 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
762 skb
->ip_summed
= CHECKSUM_PARTIAL
;
764 sk
->sk_sndmsg_off
= 0;
766 /* specify the length of each IP datagram fragment */
767 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
768 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
769 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
772 return skb_append_datato_frags(sk
, skb
, getfrag
, from
,
773 (length
- transhdrlen
));
777 * ip_append_data() and ip_append_page() can make one large IP datagram
778 * from many pieces of data. Each pieces will be holded on the socket
779 * until ip_push_pending_frames() is called. Each piece can be a page
782 * Not only UDP, other transport protocols - e.g. raw sockets - can use
783 * this interface potentially.
785 * LATER: length must be adjusted by pad at tail, when it is required.
787 int ip_append_data(struct sock
*sk
,
788 int getfrag(void *from
, char *to
, int offset
, int len
,
789 int odd
, struct sk_buff
*skb
),
790 void *from
, int length
, int transhdrlen
,
791 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
794 struct inet_sock
*inet
= inet_sk(sk
);
797 struct ip_options
*opt
= NULL
;
804 unsigned int maxfraglen
, fragheaderlen
;
805 int csummode
= CHECKSUM_NONE
;
811 if (skb_queue_empty(&sk
->sk_write_queue
)) {
817 if (inet
->cork
.opt
== NULL
) {
818 inet
->cork
.opt
= kmalloc(sizeof(struct ip_options
) + 40, sk
->sk_allocation
);
819 if (unlikely(inet
->cork
.opt
== NULL
))
822 memcpy(inet
->cork
.opt
, opt
, sizeof(struct ip_options
)+opt
->optlen
);
823 inet
->cork
.flags
|= IPCORK_OPT
;
824 inet
->cork
.addr
= ipc
->addr
;
830 * We steal reference to this route, caller should not release it
833 inet
->cork
.fragsize
= mtu
= inet
->pmtudisc
== IP_PMTUDISC_PROBE
?
835 dst_mtu(rt
->dst
.path
);
836 inet
->cork
.dst
= &rt
->dst
;
837 inet
->cork
.length
= 0;
838 sk
->sk_sndmsg_page
= NULL
;
839 sk
->sk_sndmsg_off
= 0;
840 if ((exthdrlen
= rt
->dst
.header_len
) != 0) {
842 transhdrlen
+= exthdrlen
;
845 rt
= (struct rtable
*)inet
->cork
.dst
;
846 if (inet
->cork
.flags
& IPCORK_OPT
)
847 opt
= inet
->cork
.opt
;
851 mtu
= inet
->cork
.fragsize
;
853 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
855 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
856 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
858 if (inet
->cork
.length
+ length
> 0xFFFF - fragheaderlen
) {
859 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->inet_dport
,
865 * transhdrlen > 0 means that this is the first fragment and we wish
866 * it won't be fragmented in the future.
869 length
+ fragheaderlen
<= mtu
&&
870 rt
->dst
.dev
->features
& NETIF_F_V4_CSUM
&&
872 csummode
= CHECKSUM_PARTIAL
;
874 skb
= skb_peek_tail(&sk
->sk_write_queue
);
876 inet
->cork
.length
+= length
;
877 if (((length
> mtu
) || (skb
&& skb_is_gso(skb
))) &&
878 (sk
->sk_protocol
== IPPROTO_UDP
) &&
879 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
880 err
= ip_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
881 fragheaderlen
, transhdrlen
, mtu
,
888 /* So, what's going on in the loop below?
890 * We use calculated fragment length to generate chained skb,
891 * each of segments is IP fragment ready for sending to network after
892 * adding appropriate IP header.
899 /* Check if the remaining data fits into current packet. */
900 copy
= mtu
- skb
->len
;
902 copy
= maxfraglen
- skb
->len
;
905 unsigned int datalen
;
906 unsigned int fraglen
;
907 unsigned int fraggap
;
908 unsigned int alloclen
;
909 struct sk_buff
*skb_prev
;
913 fraggap
= skb_prev
->len
- maxfraglen
;
918 * If remaining data exceeds the mtu,
919 * we know we need more fragment(s).
921 datalen
= length
+ fraggap
;
922 if (datalen
> mtu
- fragheaderlen
)
923 datalen
= maxfraglen
- fragheaderlen
;
924 fraglen
= datalen
+ fragheaderlen
;
926 if ((flags
& MSG_MORE
) &&
927 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
930 alloclen
= datalen
+ fragheaderlen
;
932 /* The last fragment gets additional space at tail.
933 * Note, with MSG_MORE we overallocate on fragments,
934 * because we have no idea what fragment will be
937 if (datalen
== length
+ fraggap
)
938 alloclen
+= rt
->dst
.trailer_len
;
941 skb
= sock_alloc_send_skb(sk
,
942 alloclen
+ hh_len
+ 15,
943 (flags
& MSG_DONTWAIT
), &err
);
946 if (atomic_read(&sk
->sk_wmem_alloc
) <=
948 skb
= sock_wmalloc(sk
,
949 alloclen
+ hh_len
+ 15, 1,
951 if (unlikely(skb
== NULL
))
954 /* only the initial fragment is
962 * Fill in the control structures
964 skb
->ip_summed
= csummode
;
966 skb_reserve(skb
, hh_len
);
967 *skb_tx(skb
) = ipc
->shtx
;
970 * Find where to start putting bytes.
972 data
= skb_put(skb
, fraglen
);
973 skb_set_network_header(skb
, exthdrlen
);
974 skb
->transport_header
= (skb
->network_header
+
976 data
+= fragheaderlen
;
979 skb
->csum
= skb_copy_and_csum_bits(
980 skb_prev
, maxfraglen
,
981 data
+ transhdrlen
, fraggap
, 0);
982 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
985 pskb_trim_unique(skb_prev
, maxfraglen
);
988 copy
= datalen
- transhdrlen
- fraggap
;
989 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
996 length
-= datalen
- fraggap
;
999 csummode
= CHECKSUM_NONE
;
1002 * Put the packet on the pending queue.
1004 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1011 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1015 if (getfrag(from
, skb_put(skb
, copy
),
1016 offset
, copy
, off
, skb
) < 0) {
1017 __skb_trim(skb
, off
);
1022 int i
= skb_shinfo(skb
)->nr_frags
;
1023 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1024 struct page
*page
= sk
->sk_sndmsg_page
;
1025 int off
= sk
->sk_sndmsg_off
;
1028 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1031 if (page
!= frag
->page
) {
1032 if (i
== MAX_SKB_FRAGS
) {
1037 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1038 frag
= &skb_shinfo(skb
)->frags
[i
];
1040 } else if (i
< MAX_SKB_FRAGS
) {
1041 if (copy
> PAGE_SIZE
)
1043 page
= alloc_pages(sk
->sk_allocation
, 0);
1048 sk
->sk_sndmsg_page
= page
;
1049 sk
->sk_sndmsg_off
= 0;
1051 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1052 frag
= &skb_shinfo(skb
)->frags
[i
];
1057 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1061 sk
->sk_sndmsg_off
+= copy
;
1064 skb
->data_len
+= copy
;
1065 skb
->truesize
+= copy
;
1066 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1075 inet
->cork
.length
-= length
;
1076 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1080 ssize_t
ip_append_page(struct sock
*sk
, struct page
*page
,
1081 int offset
, size_t size
, int flags
)
1083 struct inet_sock
*inet
= inet_sk(sk
);
1084 struct sk_buff
*skb
;
1086 struct ip_options
*opt
= NULL
;
1091 unsigned int maxfraglen
, fragheaderlen
, fraggap
;
1096 if (flags
&MSG_PROBE
)
1099 if (skb_queue_empty(&sk
->sk_write_queue
))
1102 rt
= (struct rtable
*)inet
->cork
.dst
;
1103 if (inet
->cork
.flags
& IPCORK_OPT
)
1104 opt
= inet
->cork
.opt
;
1106 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
))
1109 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1110 mtu
= inet
->cork
.fragsize
;
1112 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1113 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1115 if (inet
->cork
.length
+ size
> 0xFFFF - fragheaderlen
) {
1116 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->inet_dport
, mtu
);
1120 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1123 inet
->cork
.length
+= size
;
1124 if ((size
+ skb
->len
> mtu
) &&
1125 (sk
->sk_protocol
== IPPROTO_UDP
) &&
1126 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
1127 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
1128 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1135 if (skb_is_gso(skb
))
1139 /* Check if the remaining data fits into current packet. */
1140 len
= mtu
- skb
->len
;
1142 len
= maxfraglen
- skb
->len
;
1145 struct sk_buff
*skb_prev
;
1149 fraggap
= skb_prev
->len
- maxfraglen
;
1151 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1152 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1153 if (unlikely(!skb
)) {
1159 * Fill in the control structures
1161 skb
->ip_summed
= CHECKSUM_NONE
;
1163 skb_reserve(skb
, hh_len
);
1166 * Find where to start putting bytes.
1168 skb_put(skb
, fragheaderlen
+ fraggap
);
1169 skb_reset_network_header(skb
);
1170 skb
->transport_header
= (skb
->network_header
+
1173 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1175 skb_transport_header(skb
),
1177 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1179 pskb_trim_unique(skb_prev
, maxfraglen
);
1183 * Put the packet on the pending queue.
1185 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1189 i
= skb_shinfo(skb
)->nr_frags
;
1192 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
1193 skb_shinfo(skb
)->frags
[i
-1].size
+= len
;
1194 } else if (i
< MAX_SKB_FRAGS
) {
1196 skb_fill_page_desc(skb
, i
, page
, offset
, len
);
1202 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1204 csum
= csum_page(page
, offset
, len
);
1205 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1209 skb
->data_len
+= len
;
1210 skb
->truesize
+= len
;
1211 atomic_add(len
, &sk
->sk_wmem_alloc
);
1218 inet
->cork
.length
-= size
;
1219 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1223 static void ip_cork_release(struct inet_sock
*inet
)
1225 inet
->cork
.flags
&= ~IPCORK_OPT
;
1226 kfree(inet
->cork
.opt
);
1227 inet
->cork
.opt
= NULL
;
1228 dst_release(inet
->cork
.dst
);
1229 inet
->cork
.dst
= NULL
;
1233 * Combined all pending IP fragments on the socket as one IP datagram
1234 * and push them out.
1236 int ip_push_pending_frames(struct sock
*sk
)
1238 struct sk_buff
*skb
, *tmp_skb
;
1239 struct sk_buff
**tail_skb
;
1240 struct inet_sock
*inet
= inet_sk(sk
);
1241 struct net
*net
= sock_net(sk
);
1242 struct ip_options
*opt
= NULL
;
1243 struct rtable
*rt
= (struct rtable
*)inet
->cork
.dst
;
1249 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1251 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1253 /* move skb->data to ip header from ext header */
1254 if (skb
->data
< skb_network_header(skb
))
1255 __skb_pull(skb
, skb_network_offset(skb
));
1256 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1257 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1258 *tail_skb
= tmp_skb
;
1259 tail_skb
= &(tmp_skb
->next
);
1260 skb
->len
+= tmp_skb
->len
;
1261 skb
->data_len
+= tmp_skb
->len
;
1262 skb
->truesize
+= tmp_skb
->truesize
;
1263 tmp_skb
->destructor
= NULL
;
1267 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1268 * to fragment the frame generated here. No matter, what transforms
1269 * how transforms change size of the packet, it will come out.
1271 if (inet
->pmtudisc
< IP_PMTUDISC_DO
)
1274 /* DF bit is set when we want to see DF on outgoing frames.
1275 * If local_df is set too, we still allow to fragment this frame
1277 if (inet
->pmtudisc
>= IP_PMTUDISC_DO
||
1278 (skb
->len
<= dst_mtu(&rt
->dst
) &&
1279 ip_dont_fragment(sk
, &rt
->dst
)))
1282 if (inet
->cork
.flags
& IPCORK_OPT
)
1283 opt
= inet
->cork
.opt
;
1285 if (rt
->rt_type
== RTN_MULTICAST
)
1288 ttl
= ip_select_ttl(inet
, &rt
->dst
);
1290 iph
= (struct iphdr
*)skb
->data
;
1294 iph
->ihl
+= opt
->optlen
>>2;
1295 ip_options_build(skb
, opt
, inet
->cork
.addr
, rt
, 0);
1297 iph
->tos
= inet
->tos
;
1299 ip_select_ident(iph
, &rt
->dst
, sk
);
1301 iph
->protocol
= sk
->sk_protocol
;
1302 iph
->saddr
= rt
->rt_src
;
1303 iph
->daddr
= rt
->rt_dst
;
1305 skb
->priority
= sk
->sk_priority
;
1306 skb
->mark
= sk
->sk_mark
;
1308 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1311 inet
->cork
.dst
= NULL
;
1312 skb_dst_set(skb
, &rt
->dst
);
1314 if (iph
->protocol
== IPPROTO_ICMP
)
1315 icmp_out_count(net
, ((struct icmphdr
*)
1316 skb_transport_header(skb
))->type
);
1318 /* Netfilter gets whole the not fragmented skb. */
1319 err
= ip_local_out(skb
);
1322 err
= net_xmit_errno(err
);
1328 ip_cork_release(inet
);
1332 IP_INC_STATS(net
, IPSTATS_MIB_OUTDISCARDS
);
1337 * Throw away all pending data on the socket.
1339 void ip_flush_pending_frames(struct sock
*sk
)
1341 struct sk_buff
*skb
;
1343 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
)
1346 ip_cork_release(inet_sk(sk
));
1351 * Fetch data from kernel space and fill in checksum if needed.
1353 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1354 int len
, int odd
, struct sk_buff
*skb
)
1358 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1359 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1364 * Generic function to send a packet as reply to another packet.
1365 * Used to send TCP resets so far. ICMP should use this function too.
1367 * Should run single threaded per socket because it uses the sock
1368 * structure to pass arguments.
1370 void ip_send_reply(struct sock
*sk
, struct sk_buff
*skb
, struct ip_reply_arg
*arg
,
1373 struct inet_sock
*inet
= inet_sk(sk
);
1375 struct ip_options opt
;
1378 struct ipcm_cookie ipc
;
1380 struct rtable
*rt
= skb_rtable(skb
);
1382 if (ip_options_echo(&replyopts
.opt
, skb
))
1385 daddr
= ipc
.addr
= rt
->rt_src
;
1389 if (replyopts
.opt
.optlen
) {
1390 ipc
.opt
= &replyopts
.opt
;
1393 daddr
= replyopts
.opt
.faddr
;
1397 struct flowi fl
= { .oif
= arg
->bound_dev_if
,
1400 .saddr
= rt
->rt_spec_dst
,
1401 .tos
= RT_TOS(ip_hdr(skb
)->tos
) } },
1402 /* Not quite clean, but right. */
1404 { .sport
= tcp_hdr(skb
)->dest
,
1405 .dport
= tcp_hdr(skb
)->source
} },
1406 .proto
= sk
->sk_protocol
,
1407 .flags
= ip_reply_arg_flowi_flags(arg
) };
1408 security_skb_classify_flow(skb
, &fl
);
1409 if (ip_route_output_key(sock_net(sk
), &rt
, &fl
))
1413 /* And let IP do all the hard work.
1415 This chunk is not reenterable, hence spinlock.
1416 Note that it uses the fact, that this function is called
1417 with locally disabled BH and that sk cannot be already spinlocked.
1420 inet
->tos
= ip_hdr(skb
)->tos
;
1421 sk
->sk_priority
= skb
->priority
;
1422 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1423 sk
->sk_bound_dev_if
= arg
->bound_dev_if
;
1424 ip_append_data(sk
, ip_reply_glue_bits
, arg
->iov
->iov_base
, len
, 0,
1425 &ipc
, &rt
, MSG_DONTWAIT
);
1426 if ((skb
= skb_peek(&sk
->sk_write_queue
)) != NULL
) {
1427 if (arg
->csumoffset
>= 0)
1428 *((__sum16
*)skb_transport_header(skb
) +
1429 arg
->csumoffset
) = csum_fold(csum_add(skb
->csum
,
1431 skb
->ip_summed
= CHECKSUM_NONE
;
1432 ip_push_pending_frames(sk
);
1440 void __init
ip_init(void)
1445 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1446 igmp_mc_proc_init();