2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
73 #include <linux/skbuff.h>
77 #include <net/checksum.h>
78 #include <net/inetpeer.h>
79 #include <net/checksum.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/mroute.h>
84 #include <linux/netlink.h>
85 #include <linux/tcp.h>
87 int sysctl_ip_default_ttl
= IPDEFTTL
;
89 /* Generate a checksum for an outgoing IP datagram. */
90 __inline__
void ip_send_check(struct iphdr
*iph
)
93 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
96 /* dev_loopback_xmit for use with netfilter. */
97 static int ip_dev_loopback_xmit(struct sk_buff
*newskb
)
99 newskb
->mac
.raw
= newskb
->data
;
100 __skb_pull(newskb
, newskb
->nh
.raw
- newskb
->data
);
101 newskb
->pkt_type
= PACKET_LOOPBACK
;
102 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
103 BUG_TRAP(newskb
->dst
);
108 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
110 int ttl
= inet
->uc_ttl
;
113 ttl
= dst_metric(dst
, RTAX_HOPLIMIT
);
118 * Add an ip header to a skbuff and send it out.
121 int ip_build_and_send_pkt(struct sk_buff
*skb
, struct sock
*sk
,
122 u32 saddr
, u32 daddr
, struct ip_options
*opt
)
124 struct inet_sock
*inet
= inet_sk(sk
);
125 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
128 /* Build the IP header. */
130 iph
=(struct iphdr
*)skb_push(skb
,sizeof(struct iphdr
) + opt
->optlen
);
132 iph
=(struct iphdr
*)skb_push(skb
,sizeof(struct iphdr
));
136 iph
->tos
= inet
->tos
;
137 if (ip_dont_fragment(sk
, &rt
->u
.dst
))
138 iph
->frag_off
= htons(IP_DF
);
141 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
142 iph
->daddr
= rt
->rt_dst
;
143 iph
->saddr
= rt
->rt_src
;
144 iph
->protocol
= sk
->sk_protocol
;
145 iph
->tot_len
= htons(skb
->len
);
146 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
149 if (opt
&& opt
->optlen
) {
150 iph
->ihl
+= opt
->optlen
>>2;
151 ip_options_build(skb
, opt
, daddr
, rt
, 0);
155 skb
->priority
= sk
->sk_priority
;
158 return NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
162 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
164 static inline int ip_finish_output2(struct sk_buff
*skb
)
166 struct dst_entry
*dst
= skb
->dst
;
167 struct hh_cache
*hh
= dst
->hh
;
168 struct net_device
*dev
= dst
->dev
;
169 int hh_len
= LL_RESERVED_SPACE(dev
);
171 /* Be paranoid, rather than too clever. */
172 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->hard_header
)) {
173 struct sk_buff
*skb2
;
175 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
181 skb_set_owner_w(skb2
, skb
->sk
);
189 read_lock_bh(&hh
->hh_lock
);
190 hh_alen
= HH_DATA_ALIGN(hh
->hh_len
);
191 memcpy(skb
->data
- hh_alen
, hh
->hh_data
, hh_alen
);
192 read_unlock_bh(&hh
->hh_lock
);
193 skb_push(skb
, hh
->hh_len
);
194 return hh
->hh_output(skb
);
195 } else if (dst
->neighbour
)
196 return dst
->neighbour
->output(skb
);
199 printk(KERN_DEBUG
"ip_finish_output2: No header cache and no neighbour!\n");
204 static inline int ip_finish_output(struct sk_buff
*skb
)
206 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
207 /* Policy lookup after SNAT yielded a new policy */
208 if (skb
->dst
->xfrm
!= NULL
) {
209 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
210 return dst_output(skb
);
213 if (skb
->len
> dst_mtu(skb
->dst
) &&
214 !(skb_shinfo(skb
)->ufo_size
|| skb_shinfo(skb
)->tso_size
))
215 return ip_fragment(skb
, ip_finish_output2
);
217 return ip_finish_output2(skb
);
220 int ip_mc_output(struct sk_buff
*skb
)
222 struct sock
*sk
= skb
->sk
;
223 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
224 struct net_device
*dev
= rt
->u
.dst
.dev
;
227 * If the indicated interface is up and running, send the packet.
229 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
232 skb
->protocol
= htons(ETH_P_IP
);
235 * Multicasts are looped back for other local users
238 if (rt
->rt_flags
&RTCF_MULTICAST
) {
239 if ((!sk
|| inet_sk(sk
)->mc_loop
)
240 #ifdef CONFIG_IP_MROUTE
241 /* Small optimization: do not loopback not local frames,
242 which returned after forwarding; they will be dropped
243 by ip_mr_input in any case.
244 Note, that local frames are looped back to be delivered
247 This check is duplicated in ip_mr_input at the moment.
249 && ((rt
->rt_flags
&RTCF_LOCAL
) || !(IPCB(skb
)->flags
&IPSKB_FORWARDED
))
252 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
254 NF_HOOK(PF_INET
, NF_IP_POST_ROUTING
, newskb
, NULL
,
256 ip_dev_loopback_xmit
);
259 /* Multicasts with ttl 0 must not go beyond the host */
261 if (skb
->nh
.iph
->ttl
== 0) {
267 if (rt
->rt_flags
&RTCF_BROADCAST
) {
268 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
270 NF_HOOK(PF_INET
, NF_IP_POST_ROUTING
, newskb
, NULL
,
271 newskb
->dev
, ip_dev_loopback_xmit
);
274 return NF_HOOK_COND(PF_INET
, NF_IP_POST_ROUTING
, skb
, NULL
, skb
->dev
,
276 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
279 int ip_output(struct sk_buff
*skb
)
281 struct net_device
*dev
= skb
->dst
->dev
;
283 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
286 skb
->protocol
= htons(ETH_P_IP
);
288 return NF_HOOK_COND(PF_INET
, NF_IP_POST_ROUTING
, skb
, NULL
, dev
,
290 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
293 int ip_queue_xmit(struct sk_buff
*skb
, int ipfragok
)
295 struct sock
*sk
= skb
->sk
;
296 struct inet_sock
*inet
= inet_sk(sk
);
297 struct ip_options
*opt
= inet
->opt
;
301 /* Skip all of this if the packet is already routed,
302 * f.e. by something like SCTP.
304 rt
= (struct rtable
*) skb
->dst
;
308 /* Make sure we can route this packet. */
309 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
313 /* Use correct destination address if we have options. */
319 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
322 .saddr
= inet
->saddr
,
323 .tos
= RT_CONN_FLAGS(sk
) } },
324 .proto
= sk
->sk_protocol
,
326 { .sport
= inet
->sport
,
327 .dport
= inet
->dport
} } };
329 /* If this fails, retransmit mechanism of transport layer will
330 * keep trying until route appears or the connection times
333 if (ip_route_output_flow(&rt
, &fl
, sk
, 0))
336 sk_setup_caps(sk
, &rt
->u
.dst
);
338 skb
->dst
= dst_clone(&rt
->u
.dst
);
341 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
)
344 /* OK, we know where to send it, allocate and build IP header. */
345 iph
= (struct iphdr
*) skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
346 *((__u16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
347 iph
->tot_len
= htons(skb
->len
);
348 if (ip_dont_fragment(sk
, &rt
->u
.dst
) && !ipfragok
)
349 iph
->frag_off
= htons(IP_DF
);
352 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
353 iph
->protocol
= sk
->sk_protocol
;
354 iph
->saddr
= rt
->rt_src
;
355 iph
->daddr
= rt
->rt_dst
;
357 /* Transport layer set skb->h.foo itself. */
359 if (opt
&& opt
->optlen
) {
360 iph
->ihl
+= opt
->optlen
>> 2;
361 ip_options_build(skb
, opt
, inet
->daddr
, rt
, 0);
364 ip_select_ident_more(iph
, &rt
->u
.dst
, sk
,
365 (skb_shinfo(skb
)->tso_segs
?: 1) - 1);
367 /* Add an IP checksum. */
370 skb
->priority
= sk
->sk_priority
;
372 return NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
376 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES
);
378 return -EHOSTUNREACH
;
382 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
384 to
->pkt_type
= from
->pkt_type
;
385 to
->priority
= from
->priority
;
386 to
->protocol
= from
->protocol
;
387 dst_release(to
->dst
);
388 to
->dst
= dst_clone(from
->dst
);
391 /* Copy the flags to each fragment. */
392 IPCB(to
)->flags
= IPCB(from
)->flags
;
394 #ifdef CONFIG_NET_SCHED
395 to
->tc_index
= from
->tc_index
;
397 #ifdef CONFIG_NETFILTER
398 to
->nfmark
= from
->nfmark
;
399 /* Connection association is same as pre-frag packet */
400 nf_conntrack_put(to
->nfct
);
401 to
->nfct
= from
->nfct
;
402 nf_conntrack_get(to
->nfct
);
403 to
->nfctinfo
= from
->nfctinfo
;
404 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
405 to
->ipvs_property
= from
->ipvs_property
;
407 #ifdef CONFIG_BRIDGE_NETFILTER
408 nf_bridge_put(to
->nf_bridge
);
409 to
->nf_bridge
= from
->nf_bridge
;
410 nf_bridge_get(to
->nf_bridge
);
416 * This IP datagram is too large to be sent in one piece. Break it up into
417 * smaller pieces (each of size equal to IP header plus
418 * a block of the data of the original IP data part) that will yet fit in a
419 * single device frame, and queue such a frame for sending.
422 int ip_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
427 struct net_device
*dev
;
428 struct sk_buff
*skb2
;
429 unsigned int mtu
, hlen
, left
, len
, ll_rs
;
431 __be16 not_last_frag
;
432 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
438 * Point into the IP datagram header.
443 if (unlikely((iph
->frag_off
& htons(IP_DF
)) && !skb
->local_df
)) {
444 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
445 htonl(dst_mtu(&rt
->u
.dst
)));
451 * Setup starting values.
455 mtu
= dst_mtu(&rt
->u
.dst
) - hlen
; /* Size of data space */
456 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
458 /* When frag_list is given, use it. First, check its validity:
459 * some transformers could create wrong frag_list or break existing
460 * one, it is not prohibited. In this case fall back to copying.
462 * LATER: this step can be merged to real generation of fragments,
463 * we can switch to copy when see the first bad fragment.
465 if (skb_shinfo(skb
)->frag_list
) {
466 struct sk_buff
*frag
;
467 int first_len
= skb_pagelen(skb
);
469 if (first_len
- hlen
> mtu
||
470 ((first_len
- hlen
) & 7) ||
471 (iph
->frag_off
& htons(IP_MF
|IP_OFFSET
)) ||
475 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
) {
476 /* Correct geometry. */
477 if (frag
->len
> mtu
||
478 ((frag
->len
& 7) && frag
->next
) ||
479 skb_headroom(frag
) < hlen
)
482 /* Partially cloned skb? */
483 if (skb_shared(frag
))
490 frag
->destructor
= sock_wfree
;
491 skb
->truesize
-= frag
->truesize
;
495 /* Everything is OK. Generate! */
499 frag
= skb_shinfo(skb
)->frag_list
;
500 skb_shinfo(skb
)->frag_list
= NULL
;
501 skb
->data_len
= first_len
- skb_headlen(skb
);
502 skb
->len
= first_len
;
503 iph
->tot_len
= htons(first_len
);
504 iph
->frag_off
= htons(IP_MF
);
508 /* Prepare header of the next frame,
509 * before previous one went down. */
511 frag
->ip_summed
= CHECKSUM_NONE
;
512 frag
->h
.raw
= frag
->data
;
513 frag
->nh
.raw
= __skb_push(frag
, hlen
);
514 memcpy(frag
->nh
.raw
, iph
, hlen
);
516 iph
->tot_len
= htons(frag
->len
);
517 ip_copy_metadata(frag
, skb
);
519 ip_options_fragment(frag
);
520 offset
+= skb
->len
- hlen
;
521 iph
->frag_off
= htons(offset
>>3);
522 if (frag
->next
!= NULL
)
523 iph
->frag_off
|= htons(IP_MF
);
524 /* Ready, complete checksum */
539 IP_INC_STATS(IPSTATS_MIB_FRAGOKS
);
548 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
553 left
= skb
->len
- hlen
; /* Space per frame */
554 ptr
= raw
+ hlen
; /* Where to start from */
556 #ifdef CONFIG_BRIDGE_NETFILTER
557 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
558 * we need to make room for the encapsulating header */
559 ll_rs
= LL_RESERVED_SPACE_EXTRA(rt
->u
.dst
.dev
, nf_bridge_pad(skb
));
560 mtu
-= nf_bridge_pad(skb
);
562 ll_rs
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
565 * Fragment the datagram.
568 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
569 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
572 * Keep copying data until we run out.
577 /* IF: it doesn't fit, use 'mtu' - the data space left */
580 /* IF: we are not sending upto and including the packet end
581 then align the next start on an eight byte boundary */
589 if ((skb2
= alloc_skb(len
+hlen
+ll_rs
, GFP_ATOMIC
)) == NULL
) {
590 NETDEBUG(KERN_INFO
"IP: frag: no memory for new fragment!\n");
596 * Set up data on packet
599 ip_copy_metadata(skb2
, skb
);
600 skb_reserve(skb2
, ll_rs
);
601 skb_put(skb2
, len
+ hlen
);
602 skb2
->nh
.raw
= skb2
->data
;
603 skb2
->h
.raw
= skb2
->data
+ hlen
;
606 * Charge the memory for the fragment to any owner
611 skb_set_owner_w(skb2
, skb
->sk
);
614 * Copy the packet header into the new buffer.
617 memcpy(skb2
->nh
.raw
, skb
->data
, hlen
);
620 * Copy a block of the IP datagram.
622 if (skb_copy_bits(skb
, ptr
, skb2
->h
.raw
, len
))
627 * Fill in the new header fields.
630 iph
->frag_off
= htons((offset
>> 3));
632 /* ANK: dirty, but effective trick. Upgrade options only if
633 * the segment to be fragmented was THE FIRST (otherwise,
634 * options are already fixed) and make it ONCE
635 * on the initial skb, so that all the following fragments
636 * will inherit fixed options.
639 ip_options_fragment(skb
);
642 * Added AC : If we are fragmenting a fragment that's not the
643 * last fragment then keep MF on each bit
645 if (left
> 0 || not_last_frag
)
646 iph
->frag_off
|= htons(IP_MF
);
651 * Put this fragment into the sending queue.
654 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES
);
656 iph
->tot_len
= htons(len
+ hlen
);
665 IP_INC_STATS(IPSTATS_MIB_FRAGOKS
);
670 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
674 EXPORT_SYMBOL(ip_fragment
);
677 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
679 struct iovec
*iov
= from
;
681 if (skb
->ip_summed
== CHECKSUM_HW
) {
682 if (memcpy_fromiovecend(to
, iov
, offset
, len
) < 0)
685 unsigned int csum
= 0;
686 if (csum_partial_copy_fromiovecend(to
, iov
, offset
, len
, &csum
) < 0)
688 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
693 static inline unsigned int
694 csum_page(struct page
*page
, int offset
, int copy
)
699 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
704 static inline int ip_ufo_append_data(struct sock
*sk
,
705 int getfrag(void *from
, char *to
, int offset
, int len
,
706 int odd
, struct sk_buff
*skb
),
707 void *from
, int length
, int hh_len
, int fragheaderlen
,
708 int transhdrlen
, int mtu
,unsigned int flags
)
713 /* There is support for UDP fragmentation offload by network
714 * device, so create one single skb packet containing complete
717 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
718 skb
= sock_alloc_send_skb(sk
,
719 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
720 (flags
& MSG_DONTWAIT
), &err
);
725 /* reserve space for Hardware header */
726 skb_reserve(skb
, hh_len
);
728 /* create space for UDP/IP header */
729 skb_put(skb
,fragheaderlen
+ transhdrlen
);
731 /* initialize network header pointer */
732 skb
->nh
.raw
= skb
->data
;
734 /* initialize protocol header pointer */
735 skb
->h
.raw
= skb
->data
+ fragheaderlen
;
737 skb
->ip_summed
= CHECKSUM_HW
;
739 sk
->sk_sndmsg_off
= 0;
742 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
743 (length
- transhdrlen
));
745 /* specify the length of each IP datagram fragment*/
746 skb_shinfo(skb
)->ufo_size
= (mtu
- fragheaderlen
);
747 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
751 /* There is not enough support do UFO ,
752 * so follow normal path
759 * ip_append_data() and ip_append_page() can make one large IP datagram
760 * from many pieces of data. Each pieces will be holded on the socket
761 * until ip_push_pending_frames() is called. Each piece can be a page
764 * Not only UDP, other transport protocols - e.g. raw sockets - can use
765 * this interface potentially.
767 * LATER: length must be adjusted by pad at tail, when it is required.
769 int ip_append_data(struct sock
*sk
,
770 int getfrag(void *from
, char *to
, int offset
, int len
,
771 int odd
, struct sk_buff
*skb
),
772 void *from
, int length
, int transhdrlen
,
773 struct ipcm_cookie
*ipc
, struct rtable
*rt
,
776 struct inet_sock
*inet
= inet_sk(sk
);
779 struct ip_options
*opt
= NULL
;
786 unsigned int maxfraglen
, fragheaderlen
;
787 int csummode
= CHECKSUM_NONE
;
792 if (skb_queue_empty(&sk
->sk_write_queue
)) {
798 if (inet
->cork
.opt
== NULL
) {
799 inet
->cork
.opt
= kmalloc(sizeof(struct ip_options
) + 40, sk
->sk_allocation
);
800 if (unlikely(inet
->cork
.opt
== NULL
))
803 memcpy(inet
->cork
.opt
, opt
, sizeof(struct ip_options
)+opt
->optlen
);
804 inet
->cork
.flags
|= IPCORK_OPT
;
805 inet
->cork
.addr
= ipc
->addr
;
807 dst_hold(&rt
->u
.dst
);
808 inet
->cork
.fragsize
= mtu
= dst_mtu(rt
->u
.dst
.path
);
810 inet
->cork
.length
= 0;
811 sk
->sk_sndmsg_page
= NULL
;
812 sk
->sk_sndmsg_off
= 0;
813 if ((exthdrlen
= rt
->u
.dst
.header_len
) != 0) {
815 transhdrlen
+= exthdrlen
;
819 if (inet
->cork
.flags
& IPCORK_OPT
)
820 opt
= inet
->cork
.opt
;
824 mtu
= inet
->cork
.fragsize
;
826 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
828 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
829 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
831 if (inet
->cork
.length
+ length
> 0xFFFF - fragheaderlen
) {
832 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->dport
, mtu
-exthdrlen
);
837 * transhdrlen > 0 means that this is the first fragment and we wish
838 * it won't be fragmented in the future.
841 length
+ fragheaderlen
<= mtu
&&
842 rt
->u
.dst
.dev
->features
&(NETIF_F_IP_CSUM
|NETIF_F_NO_CSUM
|NETIF_F_HW_CSUM
) &&
844 csummode
= CHECKSUM_HW
;
846 inet
->cork
.length
+= length
;
847 if (((length
> mtu
) && (sk
->sk_protocol
== IPPROTO_UDP
)) &&
848 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
850 err
= ip_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
851 fragheaderlen
, transhdrlen
, mtu
,
858 /* So, what's going on in the loop below?
860 * We use calculated fragment length to generate chained skb,
861 * each of segments is IP fragment ready for sending to network after
862 * adding appropriate IP header.
865 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
869 /* Check if the remaining data fits into current packet. */
870 copy
= mtu
- skb
->len
;
872 copy
= maxfraglen
- skb
->len
;
875 unsigned int datalen
;
876 unsigned int fraglen
;
877 unsigned int fraggap
;
878 unsigned int alloclen
;
879 struct sk_buff
*skb_prev
;
883 fraggap
= skb_prev
->len
- maxfraglen
;
888 * If remaining data exceeds the mtu,
889 * we know we need more fragment(s).
891 datalen
= length
+ fraggap
;
892 if (datalen
> mtu
- fragheaderlen
)
893 datalen
= maxfraglen
- fragheaderlen
;
894 fraglen
= datalen
+ fragheaderlen
;
896 if ((flags
& MSG_MORE
) &&
897 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
900 alloclen
= datalen
+ fragheaderlen
;
902 /* The last fragment gets additional space at tail.
903 * Note, with MSG_MORE we overallocate on fragments,
904 * because we have no idea what fragment will be
907 if (datalen
== length
+ fraggap
)
908 alloclen
+= rt
->u
.dst
.trailer_len
;
911 skb
= sock_alloc_send_skb(sk
,
912 alloclen
+ hh_len
+ 15,
913 (flags
& MSG_DONTWAIT
), &err
);
916 if (atomic_read(&sk
->sk_wmem_alloc
) <=
918 skb
= sock_wmalloc(sk
,
919 alloclen
+ hh_len
+ 15, 1,
921 if (unlikely(skb
== NULL
))
928 * Fill in the control structures
930 skb
->ip_summed
= csummode
;
932 skb_reserve(skb
, hh_len
);
935 * Find where to start putting bytes.
937 data
= skb_put(skb
, fraglen
);
938 skb
->nh
.raw
= data
+ exthdrlen
;
939 data
+= fragheaderlen
;
940 skb
->h
.raw
= data
+ exthdrlen
;
943 skb
->csum
= skb_copy_and_csum_bits(
944 skb_prev
, maxfraglen
,
945 data
+ transhdrlen
, fraggap
, 0);
946 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
949 skb_trim(skb_prev
, maxfraglen
);
952 copy
= datalen
- transhdrlen
- fraggap
;
953 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
960 length
-= datalen
- fraggap
;
963 csummode
= CHECKSUM_NONE
;
966 * Put the packet on the pending queue.
968 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
975 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
979 if (getfrag(from
, skb_put(skb
, copy
),
980 offset
, copy
, off
, skb
) < 0) {
981 __skb_trim(skb
, off
);
986 int i
= skb_shinfo(skb
)->nr_frags
;
987 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
988 struct page
*page
= sk
->sk_sndmsg_page
;
989 int off
= sk
->sk_sndmsg_off
;
992 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
995 if (page
!= frag
->page
) {
996 if (i
== MAX_SKB_FRAGS
) {
1001 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1002 frag
= &skb_shinfo(skb
)->frags
[i
];
1004 } else if (i
< MAX_SKB_FRAGS
) {
1005 if (copy
> PAGE_SIZE
)
1007 page
= alloc_pages(sk
->sk_allocation
, 0);
1012 sk
->sk_sndmsg_page
= page
;
1013 sk
->sk_sndmsg_off
= 0;
1015 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1016 frag
= &skb_shinfo(skb
)->frags
[i
];
1017 skb
->truesize
+= PAGE_SIZE
;
1018 atomic_add(PAGE_SIZE
, &sk
->sk_wmem_alloc
);
1023 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1027 sk
->sk_sndmsg_off
+= copy
;
1030 skb
->data_len
+= copy
;
1039 inet
->cork
.length
-= length
;
1040 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1044 ssize_t
ip_append_page(struct sock
*sk
, struct page
*page
,
1045 int offset
, size_t size
, int flags
)
1047 struct inet_sock
*inet
= inet_sk(sk
);
1048 struct sk_buff
*skb
;
1050 struct ip_options
*opt
= NULL
;
1055 unsigned int maxfraglen
, fragheaderlen
, fraggap
;
1060 if (flags
&MSG_PROBE
)
1063 if (skb_queue_empty(&sk
->sk_write_queue
))
1067 if (inet
->cork
.flags
& IPCORK_OPT
)
1068 opt
= inet
->cork
.opt
;
1070 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
1073 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
1074 mtu
= inet
->cork
.fragsize
;
1076 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1077 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1079 if (inet
->cork
.length
+ size
> 0xFFFF - fragheaderlen
) {
1080 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->dport
, mtu
);
1084 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1087 inet
->cork
.length
+= size
;
1088 if ((sk
->sk_protocol
== IPPROTO_UDP
) &&
1089 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
))
1090 skb_shinfo(skb
)->ufo_size
= (mtu
- fragheaderlen
);
1096 if (skb_shinfo(skb
)->ufo_size
)
1100 /* Check if the remaining data fits into current packet. */
1101 len
= mtu
- skb
->len
;
1103 len
= maxfraglen
- skb
->len
;
1106 struct sk_buff
*skb_prev
;
1112 fraggap
= skb_prev
->len
- maxfraglen
;
1114 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1115 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1116 if (unlikely(!skb
)) {
1122 * Fill in the control structures
1124 skb
->ip_summed
= CHECKSUM_NONE
;
1126 skb_reserve(skb
, hh_len
);
1129 * Find where to start putting bytes.
1131 data
= skb_put(skb
, fragheaderlen
+ fraggap
);
1132 skb
->nh
.iph
= iph
= (struct iphdr
*)data
;
1133 data
+= fragheaderlen
;
1137 skb
->csum
= skb_copy_and_csum_bits(
1138 skb_prev
, maxfraglen
,
1140 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1142 skb_trim(skb_prev
, maxfraglen
);
1146 * Put the packet on the pending queue.
1148 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1152 i
= skb_shinfo(skb
)->nr_frags
;
1155 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
1156 skb_shinfo(skb
)->frags
[i
-1].size
+= len
;
1157 } else if (i
< MAX_SKB_FRAGS
) {
1159 skb_fill_page_desc(skb
, i
, page
, offset
, len
);
1165 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1167 csum
= csum_page(page
, offset
, len
);
1168 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1172 skb
->data_len
+= len
;
1179 inet
->cork
.length
-= size
;
1180 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1185 * Combined all pending IP fragments on the socket as one IP datagram
1186 * and push them out.
1188 int ip_push_pending_frames(struct sock
*sk
)
1190 struct sk_buff
*skb
, *tmp_skb
;
1191 struct sk_buff
**tail_skb
;
1192 struct inet_sock
*inet
= inet_sk(sk
);
1193 struct ip_options
*opt
= NULL
;
1194 struct rtable
*rt
= inet
->cork
.rt
;
1200 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1202 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1204 /* move skb->data to ip header from ext header */
1205 if (skb
->data
< skb
->nh
.raw
)
1206 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1207 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1208 __skb_pull(tmp_skb
, skb
->h
.raw
- skb
->nh
.raw
);
1209 *tail_skb
= tmp_skb
;
1210 tail_skb
= &(tmp_skb
->next
);
1211 skb
->len
+= tmp_skb
->len
;
1212 skb
->data_len
+= tmp_skb
->len
;
1213 skb
->truesize
+= tmp_skb
->truesize
;
1214 __sock_put(tmp_skb
->sk
);
1215 tmp_skb
->destructor
= NULL
;
1219 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1220 * to fragment the frame generated here. No matter, what transforms
1221 * how transforms change size of the packet, it will come out.
1223 if (inet
->pmtudisc
!= IP_PMTUDISC_DO
)
1226 /* DF bit is set when we want to see DF on outgoing frames.
1227 * If local_df is set too, we still allow to fragment this frame
1229 if (inet
->pmtudisc
== IP_PMTUDISC_DO
||
1230 (skb
->len
<= dst_mtu(&rt
->u
.dst
) &&
1231 ip_dont_fragment(sk
, &rt
->u
.dst
)))
1234 if (inet
->cork
.flags
& IPCORK_OPT
)
1235 opt
= inet
->cork
.opt
;
1237 if (rt
->rt_type
== RTN_MULTICAST
)
1240 ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
1242 iph
= (struct iphdr
*)skb
->data
;
1246 iph
->ihl
+= opt
->optlen
>>2;
1247 ip_options_build(skb
, opt
, inet
->cork
.addr
, rt
, 0);
1249 iph
->tos
= inet
->tos
;
1250 iph
->tot_len
= htons(skb
->len
);
1252 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
1254 iph
->protocol
= sk
->sk_protocol
;
1255 iph
->saddr
= rt
->rt_src
;
1256 iph
->daddr
= rt
->rt_dst
;
1259 skb
->priority
= sk
->sk_priority
;
1260 skb
->dst
= dst_clone(&rt
->u
.dst
);
1262 /* Netfilter gets whole the not fragmented skb. */
1263 err
= NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
,
1264 skb
->dst
->dev
, dst_output
);
1267 err
= inet
->recverr
? net_xmit_errno(err
) : 0;
1273 inet
->cork
.flags
&= ~IPCORK_OPT
;
1274 kfree(inet
->cork
.opt
);
1275 inet
->cork
.opt
= NULL
;
1276 if (inet
->cork
.rt
) {
1277 ip_rt_put(inet
->cork
.rt
);
1278 inet
->cork
.rt
= NULL
;
1283 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1288 * Throw away all pending data on the socket.
1290 void ip_flush_pending_frames(struct sock
*sk
)
1292 struct inet_sock
*inet
= inet_sk(sk
);
1293 struct sk_buff
*skb
;
1295 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
)
1298 inet
->cork
.flags
&= ~IPCORK_OPT
;
1299 kfree(inet
->cork
.opt
);
1300 inet
->cork
.opt
= NULL
;
1301 if (inet
->cork
.rt
) {
1302 ip_rt_put(inet
->cork
.rt
);
1303 inet
->cork
.rt
= NULL
;
1309 * Fetch data from kernel space and fill in checksum if needed.
1311 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1312 int len
, int odd
, struct sk_buff
*skb
)
1316 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1317 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1322 * Generic function to send a packet as reply to another packet.
1323 * Used to send TCP resets so far. ICMP should use this function too.
1325 * Should run single threaded per socket because it uses the sock
1326 * structure to pass arguments.
1328 * LATER: switch from ip_build_xmit to ip_append_*
1330 void ip_send_reply(struct sock
*sk
, struct sk_buff
*skb
, struct ip_reply_arg
*arg
,
1333 struct inet_sock
*inet
= inet_sk(sk
);
1335 struct ip_options opt
;
1338 struct ipcm_cookie ipc
;
1340 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1342 if (ip_options_echo(&replyopts
.opt
, skb
))
1345 daddr
= ipc
.addr
= rt
->rt_src
;
1348 if (replyopts
.opt
.optlen
) {
1349 ipc
.opt
= &replyopts
.opt
;
1352 daddr
= replyopts
.opt
.faddr
;
1356 struct flowi fl
= { .nl_u
= { .ip4_u
=
1358 .saddr
= rt
->rt_spec_dst
,
1359 .tos
= RT_TOS(skb
->nh
.iph
->tos
) } },
1360 /* Not quite clean, but right. */
1362 { .sport
= skb
->h
.th
->dest
,
1363 .dport
= skb
->h
.th
->source
} },
1364 .proto
= sk
->sk_protocol
};
1365 if (ip_route_output_key(&rt
, &fl
))
1369 /* And let IP do all the hard work.
1371 This chunk is not reenterable, hence spinlock.
1372 Note that it uses the fact, that this function is called
1373 with locally disabled BH and that sk cannot be already spinlocked.
1376 inet
->tos
= skb
->nh
.iph
->tos
;
1377 sk
->sk_priority
= skb
->priority
;
1378 sk
->sk_protocol
= skb
->nh
.iph
->protocol
;
1379 ip_append_data(sk
, ip_reply_glue_bits
, arg
->iov
->iov_base
, len
, 0,
1380 &ipc
, rt
, MSG_DONTWAIT
);
1381 if ((skb
= skb_peek(&sk
->sk_write_queue
)) != NULL
) {
1382 if (arg
->csumoffset
>= 0)
1383 *((u16
*)skb
->h
.raw
+ arg
->csumoffset
) = csum_fold(csum_add(skb
->csum
, arg
->csum
));
1384 skb
->ip_summed
= CHECKSUM_NONE
;
1385 ip_push_pending_frames(sk
);
1393 void __init
ip_init(void)
1398 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1399 igmp_mc_proc_init();
1403 EXPORT_SYMBOL(ip_generic_getfrag
);
1404 EXPORT_SYMBOL(ip_queue_xmit
);
1405 EXPORT_SYMBOL(ip_send_check
);