1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The Internet Protocol (IP) module.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
42 * (Thanks to Mark.Bush@prg.ox.ac.uk)
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
100 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101 * and could be made very efficient with the addition of some virtual memory hacks to permit
102 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
103 * Output fragmentation wants updating along with the buffer management to use a single
104 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106 * fragmentation anyway.
109 #define pr_fmt(fmt) "IPv4: " fmt
111 #include <linux/module.h>
112 #include <linux/types.h>
113 #include <linux/kernel.h>
114 #include <linux/string.h>
115 #include <linux/errno.h>
116 #include <linux/slab.h>
118 #include <linux/net.h>
119 #include <linux/socket.h>
120 #include <linux/sockios.h>
121 #include <linux/in.h>
122 #include <linux/inet.h>
123 #include <linux/inetdevice.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/indirect_call_wrapper.h>
128 #include <net/snmp.h>
130 #include <net/protocol.h>
131 #include <net/route.h>
132 #include <linux/skbuff.h>
133 #include <net/sock.h>
135 #include <net/icmp.h>
137 #include <net/checksum.h>
138 #include <net/inet_ecn.h>
139 #include <linux/netfilter_ipv4.h>
140 #include <net/xfrm.h>
141 #include <linux/mroute.h>
142 #include <linux/netlink.h>
143 #include <net/dst_metadata.h>
146 * Process Router Attention IP option (RFC 2113)
148 bool ip_call_ra_chain(struct sk_buff
*skb
)
150 struct ip_ra_chain
*ra
;
151 u8 protocol
= ip_hdr(skb
)->protocol
;
152 struct sock
*last
= NULL
;
153 struct net_device
*dev
= skb
->dev
;
154 struct net
*net
= dev_net(dev
);
156 for (ra
= rcu_dereference(net
->ipv4
.ra_chain
); ra
; ra
= rcu_dereference(ra
->next
)) {
157 struct sock
*sk
= ra
->sk
;
159 /* If socket is bound to an interface, only report
160 * the packet if it came from that interface.
162 if (sk
&& inet_sk(sk
)->inet_num
== protocol
&&
163 (!sk
->sk_bound_dev_if
||
164 sk
->sk_bound_dev_if
== dev
->ifindex
)) {
165 if (ip_is_fragment(ip_hdr(skb
))) {
166 if (ip_defrag(net
, skb
, IP_DEFRAG_CALL_RA_CHAIN
))
170 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
185 INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff
*));
186 INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff
*));
187 void ip_protocol_deliver_rcu(struct net
*net
, struct sk_buff
*skb
, int protocol
)
189 const struct net_protocol
*ipprot
;
193 raw
= raw_local_deliver(skb
, protocol
);
195 ipprot
= rcu_dereference(inet_protos
[protocol
]);
197 if (!ipprot
->no_policy
) {
198 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
204 ret
= INDIRECT_CALL_2(ipprot
->handler
, tcp_v4_rcv
, udp_rcv
,
210 __IP_INC_STATS(net
, IPSTATS_MIB_INDELIVERS
);
213 if (xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
214 __IP_INC_STATS(net
, IPSTATS_MIB_INUNKNOWNPROTOS
);
215 icmp_send(skb
, ICMP_DEST_UNREACH
,
216 ICMP_PROT_UNREACH
, 0);
220 __IP_INC_STATS(net
, IPSTATS_MIB_INDELIVERS
);
226 static int ip_local_deliver_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
228 __skb_pull(skb
, skb_network_header_len(skb
));
231 ip_protocol_deliver_rcu(net
, skb
, ip_hdr(skb
)->protocol
);
238 * Deliver IP Packets to the higher protocol layers.
240 int ip_local_deliver(struct sk_buff
*skb
)
243 * Reassemble IP fragments.
245 struct net
*net
= dev_net(skb
->dev
);
247 if (ip_is_fragment(ip_hdr(skb
))) {
248 if (ip_defrag(net
, skb
, IP_DEFRAG_LOCAL_DELIVER
))
252 return NF_HOOK(NFPROTO_IPV4
, NF_INET_LOCAL_IN
,
253 net
, NULL
, skb
, skb
->dev
, NULL
,
254 ip_local_deliver_finish
);
257 static inline bool ip_rcv_options(struct sk_buff
*skb
, struct net_device
*dev
)
259 struct ip_options
*opt
;
260 const struct iphdr
*iph
;
262 /* It looks as overkill, because not all
263 IP options require packet mangling.
264 But it is the easiest for now, especially taking
265 into account that combination of IP options
266 and running sniffer is extremely rare condition.
269 if (skb_cow(skb
, skb_headroom(skb
))) {
270 __IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_INDISCARDS
);
275 opt
= &(IPCB(skb
)->opt
);
276 opt
->optlen
= iph
->ihl
*4 - sizeof(struct iphdr
);
278 if (ip_options_compile(dev_net(dev
), opt
, skb
)) {
279 __IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_INHDRERRORS
);
283 if (unlikely(opt
->srr
)) {
284 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
287 if (!IN_DEV_SOURCE_ROUTE(in_dev
)) {
288 if (IN_DEV_LOG_MARTIANS(in_dev
))
289 net_info_ratelimited("source route option %pI4 -> %pI4\n",
296 if (ip_options_rcv_srr(skb
, dev
))
305 INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff
*));
306 INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff
*));
307 static int ip_rcv_finish_core(struct net
*net
, struct sock
*sk
,
308 struct sk_buff
*skb
, struct net_device
*dev
)
310 const struct iphdr
*iph
= ip_hdr(skb
);
311 int (*edemux
)(struct sk_buff
*skb
);
315 if (net
->ipv4
.sysctl_ip_early_demux
&&
318 !ip_is_fragment(iph
)) {
319 const struct net_protocol
*ipprot
;
320 int protocol
= iph
->protocol
;
322 ipprot
= rcu_dereference(inet_protos
[protocol
]);
323 if (ipprot
&& (edemux
= READ_ONCE(ipprot
->early_demux
))) {
324 err
= INDIRECT_CALL_2(edemux
, tcp_v4_early_demux
,
325 udp_v4_early_demux
, skb
);
328 /* must reload iph, skb->head might have changed */
334 * Initialise the virtual path cache for the packet. It describes
335 * how the packet travels inside Linux networking.
337 if (!skb_valid_dst(skb
)) {
338 err
= ip_route_input_noref(skb
, iph
->daddr
, iph
->saddr
,
344 #ifdef CONFIG_IP_ROUTE_CLASSID
345 if (unlikely(skb_dst(skb
)->tclassid
)) {
346 struct ip_rt_acct
*st
= this_cpu_ptr(ip_rt_acct
);
347 u32 idx
= skb_dst(skb
)->tclassid
;
348 st
[idx
&0xFF].o_packets
++;
349 st
[idx
&0xFF].o_bytes
+= skb
->len
;
350 st
[(idx
>>16)&0xFF].i_packets
++;
351 st
[(idx
>>16)&0xFF].i_bytes
+= skb
->len
;
355 if (iph
->ihl
> 5 && ip_rcv_options(skb
, dev
))
358 rt
= skb_rtable(skb
);
359 if (rt
->rt_type
== RTN_MULTICAST
) {
360 __IP_UPD_PO_STATS(net
, IPSTATS_MIB_INMCAST
, skb
->len
);
361 } else if (rt
->rt_type
== RTN_BROADCAST
) {
362 __IP_UPD_PO_STATS(net
, IPSTATS_MIB_INBCAST
, skb
->len
);
363 } else if (skb
->pkt_type
== PACKET_BROADCAST
||
364 skb
->pkt_type
== PACKET_MULTICAST
) {
365 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
369 * When a host sends a datagram to a link-layer broadcast
370 * address, the IP destination address MUST be a legal IP
371 * broadcast or IP multicast address.
373 * A host SHOULD silently discard a datagram that is received
374 * via a link-layer broadcast (see Section 2.4) but does not
375 * specify an IP multicast or broadcast destination address.
377 * This doesn't explicitly say L2 *broadcast*, but broadcast is
378 * in a way a form of multicast and the most common use case for
379 * this is 802.11 protecting against cross-station spoofing (the
380 * so-called "hole-196" attack) so do it for both.
383 IN_DEV_ORCONF(in_dev
, DROP_UNICAST_IN_L2_MULTICAST
))
387 return NET_RX_SUCCESS
;
395 __NET_INC_STATS(net
, LINUX_MIB_IPRPFILTER
);
399 static int ip_rcv_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
401 struct net_device
*dev
= skb
->dev
;
404 /* if ingress device is enslaved to an L3 master device pass the
405 * skb to its handler for processing
407 skb
= l3mdev_ip_rcv(skb
);
409 return NET_RX_SUCCESS
;
411 ret
= ip_rcv_finish_core(net
, sk
, skb
, dev
);
412 if (ret
!= NET_RX_DROP
)
413 ret
= dst_input(skb
);
418 * Main IP Receive routine.
420 static struct sk_buff
*ip_rcv_core(struct sk_buff
*skb
, struct net
*net
)
422 const struct iphdr
*iph
;
425 /* When the interface is in promisc. mode, drop all the crap
426 * that it receives, do not try to analyse it.
428 if (skb
->pkt_type
== PACKET_OTHERHOST
)
431 __IP_UPD_PO_STATS(net
, IPSTATS_MIB_IN
, skb
->len
);
433 skb
= skb_share_check(skb
, GFP_ATOMIC
);
435 __IP_INC_STATS(net
, IPSTATS_MIB_INDISCARDS
);
439 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
445 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
447 * Is the datagram acceptable?
449 * 1. Length at least the size of an ip header
451 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
452 * 4. Doesn't have a bogus length
455 if (iph
->ihl
< 5 || iph
->version
!= 4)
458 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS
!= IPSTATS_MIB_NOECTPKTS
+ INET_ECN_ECT_1
);
459 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS
!= IPSTATS_MIB_NOECTPKTS
+ INET_ECN_ECT_0
);
460 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS
!= IPSTATS_MIB_NOECTPKTS
+ INET_ECN_CE
);
462 IPSTATS_MIB_NOECTPKTS
+ (iph
->tos
& INET_ECN_MASK
),
463 max_t(unsigned short, 1, skb_shinfo(skb
)->gso_segs
));
465 if (!pskb_may_pull(skb
, iph
->ihl
*4))
470 if (unlikely(ip_fast_csum((u8
*)iph
, iph
->ihl
)))
473 len
= ntohs(iph
->tot_len
);
474 if (skb
->len
< len
) {
475 __IP_INC_STATS(net
, IPSTATS_MIB_INTRUNCATEDPKTS
);
477 } else if (len
< (iph
->ihl
*4))
480 /* Our transport medium may have padded the buffer out. Now we know it
481 * is IP we can trim to the true length of the frame.
482 * Note this now means skb->len holds ntohs(iph->tot_len).
484 if (pskb_trim_rcsum(skb
, len
)) {
485 __IP_INC_STATS(net
, IPSTATS_MIB_INDISCARDS
);
490 skb
->transport_header
= skb
->network_header
+ iph
->ihl
*4;
492 /* Remove any debris in the socket control block */
493 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
494 IPCB(skb
)->iif
= skb
->skb_iif
;
496 /* Must drop socket now because of tproxy. */
502 __IP_INC_STATS(net
, IPSTATS_MIB_CSUMERRORS
);
504 __IP_INC_STATS(net
, IPSTATS_MIB_INHDRERRORS
);
512 * IP receive entry point
514 int ip_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
,
515 struct net_device
*orig_dev
)
517 struct net
*net
= dev_net(dev
);
519 skb
= ip_rcv_core(skb
, net
);
523 return NF_HOOK(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
,
524 net
, NULL
, skb
, dev
, NULL
,
528 static void ip_sublist_rcv_finish(struct list_head
*head
)
530 struct sk_buff
*skb
, *next
;
532 list_for_each_entry_safe(skb
, next
, head
, list
) {
533 skb_list_del_init(skb
);
538 static void ip_list_rcv_finish(struct net
*net
, struct sock
*sk
,
539 struct list_head
*head
)
541 struct dst_entry
*curr_dst
= NULL
;
542 struct sk_buff
*skb
, *next
;
543 struct list_head sublist
;
545 INIT_LIST_HEAD(&sublist
);
546 list_for_each_entry_safe(skb
, next
, head
, list
) {
547 struct net_device
*dev
= skb
->dev
;
548 struct dst_entry
*dst
;
550 skb_list_del_init(skb
);
551 /* if ingress device is enslaved to an L3 master device pass the
552 * skb to its handler for processing
554 skb
= l3mdev_ip_rcv(skb
);
557 if (ip_rcv_finish_core(net
, sk
, skb
, dev
) == NET_RX_DROP
)
561 if (curr_dst
!= dst
) {
562 /* dispatch old sublist */
563 if (!list_empty(&sublist
))
564 ip_sublist_rcv_finish(&sublist
);
565 /* start new sublist */
566 INIT_LIST_HEAD(&sublist
);
569 list_add_tail(&skb
->list
, &sublist
);
571 /* dispatch final sublist */
572 ip_sublist_rcv_finish(&sublist
);
575 static void ip_sublist_rcv(struct list_head
*head
, struct net_device
*dev
,
578 NF_HOOK_LIST(NFPROTO_IPV4
, NF_INET_PRE_ROUTING
, net
, NULL
,
579 head
, dev
, NULL
, ip_rcv_finish
);
580 ip_list_rcv_finish(net
, NULL
, head
);
583 /* Receive a list of IP packets */
584 void ip_list_rcv(struct list_head
*head
, struct packet_type
*pt
,
585 struct net_device
*orig_dev
)
587 struct net_device
*curr_dev
= NULL
;
588 struct net
*curr_net
= NULL
;
589 struct sk_buff
*skb
, *next
;
590 struct list_head sublist
;
592 INIT_LIST_HEAD(&sublist
);
593 list_for_each_entry_safe(skb
, next
, head
, list
) {
594 struct net_device
*dev
= skb
->dev
;
595 struct net
*net
= dev_net(dev
);
597 skb_list_del_init(skb
);
598 skb
= ip_rcv_core(skb
, net
);
602 if (curr_dev
!= dev
|| curr_net
!= net
) {
603 /* dispatch old sublist */
604 if (!list_empty(&sublist
))
605 ip_sublist_rcv(&sublist
, curr_dev
, curr_net
);
606 /* start new sublist */
607 INIT_LIST_HEAD(&sublist
);
611 list_add_tail(&skb
->list
, &sublist
);
613 /* dispatch final sublist */
614 ip_sublist_rcv(&sublist
, curr_dev
, curr_net
);