2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <linux/slab.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/netevent.h>
107 #include <net/rtnetlink.h>
109 #include <linux/sysctl.h>
112 #define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115 #define IP_MAX_MTU 0xFFF0
117 #define RT_GC_TIMEOUT (300*HZ)
119 static int ip_rt_max_size
;
120 static int ip_rt_gc_timeout __read_mostly
= RT_GC_TIMEOUT
;
121 static int ip_rt_gc_interval __read_mostly
= 60 * HZ
;
122 static int ip_rt_gc_min_interval __read_mostly
= HZ
/ 2;
123 static int ip_rt_redirect_number __read_mostly
= 9;
124 static int ip_rt_redirect_load __read_mostly
= HZ
/ 50;
125 static int ip_rt_redirect_silence __read_mostly
= ((HZ
/ 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly
= HZ
;
127 static int ip_rt_error_burst __read_mostly
= 5 * HZ
;
128 static int ip_rt_gc_elasticity __read_mostly
= 8;
129 static int ip_rt_mtu_expires __read_mostly
= 10 * 60 * HZ
;
130 static int ip_rt_min_pmtu __read_mostly
= 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly
= 256;
132 static int rt_chain_length_max __read_mostly
= 20;
134 static struct delayed_work expires_work
;
135 static unsigned long expires_ljiffies
;
138 * Interface to generic destination cache.
141 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
142 static void ipv4_dst_destroy(struct dst_entry
*dst
);
143 static void ipv4_dst_ifdown(struct dst_entry
*dst
,
144 struct net_device
*dev
, int how
);
145 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
146 static void ipv4_link_failure(struct sk_buff
*skb
);
147 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
148 static int rt_garbage_collect(struct dst_ops
*ops
);
151 static struct dst_ops ipv4_dst_ops
= {
153 .protocol
= cpu_to_be16(ETH_P_IP
),
154 .gc
= rt_garbage_collect
,
155 .check
= ipv4_dst_check
,
156 .destroy
= ipv4_dst_destroy
,
157 .ifdown
= ipv4_dst_ifdown
,
158 .negative_advice
= ipv4_negative_advice
,
159 .link_failure
= ipv4_link_failure
,
160 .update_pmtu
= ip_rt_update_pmtu
,
161 .local_out
= __ip_local_out
,
162 .entries
= ATOMIC_INIT(0),
165 #define ECN_OR_COST(class) TC_PRIO_##class
167 const __u8 ip_tos2prio
[16] = {
171 ECN_OR_COST(BESTEFFORT
),
177 ECN_OR_COST(INTERACTIVE
),
179 ECN_OR_COST(INTERACTIVE
),
180 TC_PRIO_INTERACTIVE_BULK
,
181 ECN_OR_COST(INTERACTIVE_BULK
),
182 TC_PRIO_INTERACTIVE_BULK
,
183 ECN_OR_COST(INTERACTIVE_BULK
)
191 /* The locking scheme is rather straight forward:
193 * 1) Read-Copy Update protects the buckets of the central route hash.
194 * 2) Only writers remove entries, and they hold the lock
195 * as they look at rtable reference counts.
196 * 3) Only readers acquire references to rtable entries,
197 * they do so with atomic increments and with the
201 struct rt_hash_bucket
{
202 struct rtable
*chain
;
205 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
206 defined(CONFIG_PROVE_LOCKING)
208 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
209 * The size of this table is a power of two and depends on the number of CPUS.
210 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
212 #ifdef CONFIG_LOCKDEP
213 # define RT_HASH_LOCK_SZ 256
216 # define RT_HASH_LOCK_SZ 4096
218 # define RT_HASH_LOCK_SZ 2048
220 # define RT_HASH_LOCK_SZ 1024
222 # define RT_HASH_LOCK_SZ 512
224 # define RT_HASH_LOCK_SZ 256
228 static spinlock_t
*rt_hash_locks
;
229 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
231 static __init
void rt_hash_lock_init(void)
235 rt_hash_locks
= kmalloc(sizeof(spinlock_t
) * RT_HASH_LOCK_SZ
,
238 panic("IP: failed to allocate rt_hash_locks\n");
240 for (i
= 0; i
< RT_HASH_LOCK_SZ
; i
++)
241 spin_lock_init(&rt_hash_locks
[i
]);
244 # define rt_hash_lock_addr(slot) NULL
246 static inline void rt_hash_lock_init(void)
251 static struct rt_hash_bucket
*rt_hash_table __read_mostly
;
252 static unsigned rt_hash_mask __read_mostly
;
253 static unsigned int rt_hash_log __read_mostly
;
255 static DEFINE_PER_CPU(struct rt_cache_stat
, rt_cache_stat
);
256 #define RT_CACHE_STAT_INC(field) \
257 (__raw_get_cpu_var(rt_cache_stat).field++)
259 static inline unsigned int rt_hash(__be32 daddr
, __be32 saddr
, int idx
,
262 return jhash_3words((__force u32
)daddr
, (__force u32
)saddr
,
267 static inline int rt_genid(struct net
*net
)
269 return atomic_read(&net
->ipv4
.rt_genid
);
272 #ifdef CONFIG_PROC_FS
273 struct rt_cache_iter_state
{
274 struct seq_net_private p
;
279 static struct rtable
*rt_cache_get_first(struct seq_file
*seq
)
281 struct rt_cache_iter_state
*st
= seq
->private;
282 struct rtable
*r
= NULL
;
284 for (st
->bucket
= rt_hash_mask
; st
->bucket
>= 0; --st
->bucket
) {
285 if (!rt_hash_table
[st
->bucket
].chain
)
288 r
= rcu_dereference_bh(rt_hash_table
[st
->bucket
].chain
);
290 if (dev_net(r
->u
.dst
.dev
) == seq_file_net(seq
) &&
291 r
->rt_genid
== st
->genid
)
293 r
= rcu_dereference_bh(r
->u
.dst
.rt_next
);
295 rcu_read_unlock_bh();
300 static struct rtable
*__rt_cache_get_next(struct seq_file
*seq
,
303 struct rt_cache_iter_state
*st
= seq
->private;
305 r
= r
->u
.dst
.rt_next
;
307 rcu_read_unlock_bh();
309 if (--st
->bucket
< 0)
311 } while (!rt_hash_table
[st
->bucket
].chain
);
313 r
= rt_hash_table
[st
->bucket
].chain
;
315 return rcu_dereference_bh(r
);
318 static struct rtable
*rt_cache_get_next(struct seq_file
*seq
,
321 struct rt_cache_iter_state
*st
= seq
->private;
322 while ((r
= __rt_cache_get_next(seq
, r
)) != NULL
) {
323 if (dev_net(r
->u
.dst
.dev
) != seq_file_net(seq
))
325 if (r
->rt_genid
== st
->genid
)
331 static struct rtable
*rt_cache_get_idx(struct seq_file
*seq
, loff_t pos
)
333 struct rtable
*r
= rt_cache_get_first(seq
);
336 while (pos
&& (r
= rt_cache_get_next(seq
, r
)))
338 return pos
? NULL
: r
;
341 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
343 struct rt_cache_iter_state
*st
= seq
->private;
345 return rt_cache_get_idx(seq
, *pos
- 1);
346 st
->genid
= rt_genid(seq_file_net(seq
));
347 return SEQ_START_TOKEN
;
350 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
354 if (v
== SEQ_START_TOKEN
)
355 r
= rt_cache_get_first(seq
);
357 r
= rt_cache_get_next(seq
, v
);
362 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
364 if (v
&& v
!= SEQ_START_TOKEN
)
365 rcu_read_unlock_bh();
368 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
370 if (v
== SEQ_START_TOKEN
)
371 seq_printf(seq
, "%-127s\n",
372 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
373 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
376 struct rtable
*r
= v
;
379 seq_printf(seq
, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
381 r
->u
.dst
.dev
? r
->u
.dst
.dev
->name
: "*",
382 (__force u32
)r
->rt_dst
,
383 (__force u32
)r
->rt_gateway
,
384 r
->rt_flags
, atomic_read(&r
->u
.dst
.__refcnt
),
385 r
->u
.dst
.__use
, 0, (__force u32
)r
->rt_src
,
386 (dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) ?
387 (int)dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) + 40 : 0),
388 dst_metric(&r
->u
.dst
, RTAX_WINDOW
),
389 (int)((dst_metric(&r
->u
.dst
, RTAX_RTT
) >> 3) +
390 dst_metric(&r
->u
.dst
, RTAX_RTTVAR
)),
392 r
->u
.dst
.hh
? atomic_read(&r
->u
.dst
.hh
->hh_refcnt
) : -1,
393 r
->u
.dst
.hh
? (r
->u
.dst
.hh
->hh_output
==
395 r
->rt_spec_dst
, &len
);
397 seq_printf(seq
, "%*s\n", 127 - len
, "");
402 static const struct seq_operations rt_cache_seq_ops
= {
403 .start
= rt_cache_seq_start
,
404 .next
= rt_cache_seq_next
,
405 .stop
= rt_cache_seq_stop
,
406 .show
= rt_cache_seq_show
,
409 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
411 return seq_open_net(inode
, file
, &rt_cache_seq_ops
,
412 sizeof(struct rt_cache_iter_state
));
415 static const struct file_operations rt_cache_seq_fops
= {
416 .owner
= THIS_MODULE
,
417 .open
= rt_cache_seq_open
,
420 .release
= seq_release_net
,
424 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
429 return SEQ_START_TOKEN
;
431 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
432 if (!cpu_possible(cpu
))
435 return &per_cpu(rt_cache_stat
, cpu
);
440 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
444 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
445 if (!cpu_possible(cpu
))
448 return &per_cpu(rt_cache_stat
, cpu
);
454 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
459 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
461 struct rt_cache_stat
*st
= v
;
463 if (v
== SEQ_START_TOKEN
) {
464 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
468 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
469 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
470 atomic_read(&ipv4_dst_ops
.entries
),
493 static const struct seq_operations rt_cpu_seq_ops
= {
494 .start
= rt_cpu_seq_start
,
495 .next
= rt_cpu_seq_next
,
496 .stop
= rt_cpu_seq_stop
,
497 .show
= rt_cpu_seq_show
,
501 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
503 return seq_open(file
, &rt_cpu_seq_ops
);
506 static const struct file_operations rt_cpu_seq_fops
= {
507 .owner
= THIS_MODULE
,
508 .open
= rt_cpu_seq_open
,
511 .release
= seq_release
,
514 #ifdef CONFIG_NET_CLS_ROUTE
515 static int rt_acct_proc_show(struct seq_file
*m
, void *v
)
517 struct ip_rt_acct
*dst
, *src
;
520 dst
= kcalloc(256, sizeof(struct ip_rt_acct
), GFP_KERNEL
);
524 for_each_possible_cpu(i
) {
525 src
= (struct ip_rt_acct
*)per_cpu_ptr(ip_rt_acct
, i
);
526 for (j
= 0; j
< 256; j
++) {
527 dst
[j
].o_bytes
+= src
[j
].o_bytes
;
528 dst
[j
].o_packets
+= src
[j
].o_packets
;
529 dst
[j
].i_bytes
+= src
[j
].i_bytes
;
530 dst
[j
].i_packets
+= src
[j
].i_packets
;
534 seq_write(m
, dst
, 256 * sizeof(struct ip_rt_acct
));
539 static int rt_acct_proc_open(struct inode
*inode
, struct file
*file
)
541 return single_open(file
, rt_acct_proc_show
, NULL
);
544 static const struct file_operations rt_acct_proc_fops
= {
545 .owner
= THIS_MODULE
,
546 .open
= rt_acct_proc_open
,
549 .release
= single_release
,
553 static int __net_init
ip_rt_do_proc_init(struct net
*net
)
555 struct proc_dir_entry
*pde
;
557 pde
= proc_net_fops_create(net
, "rt_cache", S_IRUGO
,
562 pde
= proc_create("rt_cache", S_IRUGO
,
563 net
->proc_net_stat
, &rt_cpu_seq_fops
);
567 #ifdef CONFIG_NET_CLS_ROUTE
568 pde
= proc_create("rt_acct", 0, net
->proc_net
, &rt_acct_proc_fops
);
574 #ifdef CONFIG_NET_CLS_ROUTE
576 remove_proc_entry("rt_cache", net
->proc_net_stat
);
579 remove_proc_entry("rt_cache", net
->proc_net
);
584 static void __net_exit
ip_rt_do_proc_exit(struct net
*net
)
586 remove_proc_entry("rt_cache", net
->proc_net_stat
);
587 remove_proc_entry("rt_cache", net
->proc_net
);
588 #ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net
->proc_net
);
593 static struct pernet_operations ip_rt_proc_ops __net_initdata
= {
594 .init
= ip_rt_do_proc_init
,
595 .exit
= ip_rt_do_proc_exit
,
598 static int __init
ip_rt_proc_init(void)
600 return register_pernet_subsys(&ip_rt_proc_ops
);
604 static inline int ip_rt_proc_init(void)
608 #endif /* CONFIG_PROC_FS */
610 static inline void rt_free(struct rtable
*rt
)
612 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
615 static inline void rt_drop(struct rtable
*rt
)
618 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
621 static inline int rt_fast_clean(struct rtable
*rth
)
623 /* Kill broadcast/multicast entries very aggresively, if they
624 collide in hash table with more useful entries */
625 return (rth
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) &&
626 rth
->fl
.iif
&& rth
->u
.dst
.rt_next
;
629 static inline int rt_valuable(struct rtable
*rth
)
631 return (rth
->rt_flags
& (RTCF_REDIRECTED
| RTCF_NOTIFY
)) ||
635 static int rt_may_expire(struct rtable
*rth
, unsigned long tmo1
, unsigned long tmo2
)
640 if (atomic_read(&rth
->u
.dst
.__refcnt
))
644 if (rth
->u
.dst
.expires
&&
645 time_after_eq(jiffies
, rth
->u
.dst
.expires
))
648 age
= jiffies
- rth
->u
.dst
.lastuse
;
650 if ((age
<= tmo1
&& !rt_fast_clean(rth
)) ||
651 (age
<= tmo2
&& rt_valuable(rth
)))
657 /* Bits of score are:
659 * 30: not quite useless
660 * 29..0: usage counter
662 static inline u32
rt_score(struct rtable
*rt
)
664 u32 score
= jiffies
- rt
->u
.dst
.lastuse
;
666 score
= ~score
& ~(3<<30);
672 !(rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
|RTCF_LOCAL
)))
678 static inline bool rt_caching(const struct net
*net
)
680 return net
->ipv4
.current_rt_cache_rebuild_count
<=
681 net
->ipv4
.sysctl_rt_cache_rebuild_count
;
684 static inline bool compare_hash_inputs(const struct flowi
*fl1
,
685 const struct flowi
*fl2
)
687 return ((((__force u32
)fl1
->nl_u
.ip4_u
.daddr
^ (__force u32
)fl2
->nl_u
.ip4_u
.daddr
) |
688 ((__force u32
)fl1
->nl_u
.ip4_u
.saddr
^ (__force u32
)fl2
->nl_u
.ip4_u
.saddr
) |
689 (fl1
->iif
^ fl2
->iif
)) == 0);
692 static inline int compare_keys(struct flowi
*fl1
, struct flowi
*fl2
)
694 return (((__force u32
)fl1
->nl_u
.ip4_u
.daddr
^ (__force u32
)fl2
->nl_u
.ip4_u
.daddr
) |
695 ((__force u32
)fl1
->nl_u
.ip4_u
.saddr
^ (__force u32
)fl2
->nl_u
.ip4_u
.saddr
) |
696 (fl1
->mark
^ fl2
->mark
) |
697 (*(u16
*)&fl1
->nl_u
.ip4_u
.tos
^ *(u16
*)&fl2
->nl_u
.ip4_u
.tos
) |
698 (fl1
->oif
^ fl2
->oif
) |
699 (fl1
->iif
^ fl2
->iif
)) == 0;
702 static inline int compare_netns(struct rtable
*rt1
, struct rtable
*rt2
)
704 return net_eq(dev_net(rt1
->u
.dst
.dev
), dev_net(rt2
->u
.dst
.dev
));
707 static inline int rt_is_expired(struct rtable
*rth
)
709 return rth
->rt_genid
!= rt_genid(dev_net(rth
->u
.dst
.dev
));
713 * Perform a full scan of hash table and free all entries.
714 * Can be called by a softirq or a process.
715 * In the later case, we want to be reschedule if necessary
717 static void rt_do_flush(int process_context
)
720 struct rtable
*rth
, *next
;
721 struct rtable
* tail
;
723 for (i
= 0; i
<= rt_hash_mask
; i
++) {
724 if (process_context
&& need_resched())
726 rth
= rt_hash_table
[i
].chain
;
730 spin_lock_bh(rt_hash_lock_addr(i
));
733 struct rtable
** prev
, * p
;
735 rth
= rt_hash_table
[i
].chain
;
737 /* defer releasing the head of the list after spin_unlock */
738 for (tail
= rth
; tail
; tail
= tail
->u
.dst
.rt_next
)
739 if (!rt_is_expired(tail
))
742 rt_hash_table
[i
].chain
= tail
;
744 /* call rt_free on entries after the tail requiring flush */
745 prev
= &rt_hash_table
[i
].chain
;
746 for (p
= *prev
; p
; p
= next
) {
747 next
= p
->u
.dst
.rt_next
;
748 if (!rt_is_expired(p
)) {
749 prev
= &p
->u
.dst
.rt_next
;
757 rth
= rt_hash_table
[i
].chain
;
758 rt_hash_table
[i
].chain
= NULL
;
761 spin_unlock_bh(rt_hash_lock_addr(i
));
763 for (; rth
!= tail
; rth
= next
) {
764 next
= rth
->u
.dst
.rt_next
;
771 * While freeing expired entries, we compute average chain length
772 * and standard deviation, using fixed-point arithmetic.
773 * This to have an estimation of rt_chain_length_max
774 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
775 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
779 #define ONE (1UL << FRACT_BITS)
782 * Given a hash chain and an item in this hash chain,
783 * find if a previous entry has the same hash_inputs
784 * (but differs on tos, mark or oif)
785 * Returns 0 if an alias is found.
786 * Returns ONE if rth has no alias before itself.
788 static int has_noalias(const struct rtable
*head
, const struct rtable
*rth
)
790 const struct rtable
*aux
= head
;
793 if (compare_hash_inputs(&aux
->fl
, &rth
->fl
))
795 aux
= aux
->u
.dst
.rt_next
;
800 static void rt_check_expire(void)
802 static unsigned int rover
;
803 unsigned int i
= rover
, goal
;
804 struct rtable
*rth
, **rthp
;
805 unsigned long samples
= 0;
806 unsigned long sum
= 0, sum2
= 0;
810 delta
= jiffies
- expires_ljiffies
;
811 expires_ljiffies
= jiffies
;
812 mult
= ((u64
)delta
) << rt_hash_log
;
813 if (ip_rt_gc_timeout
> 1)
814 do_div(mult
, ip_rt_gc_timeout
);
815 goal
= (unsigned int)mult
;
816 if (goal
> rt_hash_mask
)
817 goal
= rt_hash_mask
+ 1;
818 for (; goal
> 0; goal
--) {
819 unsigned long tmo
= ip_rt_gc_timeout
;
820 unsigned long length
;
822 i
= (i
+ 1) & rt_hash_mask
;
823 rthp
= &rt_hash_table
[i
].chain
;
833 spin_lock_bh(rt_hash_lock_addr(i
));
834 while ((rth
= *rthp
) != NULL
) {
835 prefetch(rth
->u
.dst
.rt_next
);
836 if (rt_is_expired(rth
)) {
837 *rthp
= rth
->u
.dst
.rt_next
;
841 if (rth
->u
.dst
.expires
) {
842 /* Entry is expired even if it is in use */
843 if (time_before_eq(jiffies
, rth
->u
.dst
.expires
)) {
846 rthp
= &rth
->u
.dst
.rt_next
;
848 * We only count entries on
849 * a chain with equal hash inputs once
850 * so that entries for different QOS
851 * levels, and other non-hash input
852 * attributes don't unfairly skew
853 * the length computation
855 length
+= has_noalias(rt_hash_table
[i
].chain
, rth
);
858 } else if (!rt_may_expire(rth
, tmo
, ip_rt_gc_timeout
))
861 /* Cleanup aged off entries. */
862 *rthp
= rth
->u
.dst
.rt_next
;
865 spin_unlock_bh(rt_hash_lock_addr(i
));
867 sum2
+= length
*length
;
870 unsigned long avg
= sum
/ samples
;
871 unsigned long sd
= int_sqrt(sum2
/ samples
- avg
*avg
);
872 rt_chain_length_max
= max_t(unsigned long,
874 (avg
+ 4*sd
) >> FRACT_BITS
);
880 * rt_worker_func() is run in process context.
881 * we call rt_check_expire() to scan part of the hash table
883 static void rt_worker_func(struct work_struct
*work
)
886 schedule_delayed_work(&expires_work
, ip_rt_gc_interval
);
890 * Pertubation of rt_genid by a small quantity [1..256]
891 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
892 * many times (2^24) without giving recent rt_genid.
893 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
895 static void rt_cache_invalidate(struct net
*net
)
897 unsigned char shuffle
;
899 get_random_bytes(&shuffle
, sizeof(shuffle
));
900 atomic_add(shuffle
+ 1U, &net
->ipv4
.rt_genid
);
904 * delay < 0 : invalidate cache (fast : entries will be deleted later)
905 * delay >= 0 : invalidate & flush cache (can be long)
907 void rt_cache_flush(struct net
*net
, int delay
)
909 rt_cache_invalidate(net
);
911 rt_do_flush(!in_softirq());
914 /* Flush previous cache invalidated entries from the cache */
915 void rt_cache_flush_batch(void)
917 rt_do_flush(!in_softirq());
920 static void rt_emergency_hash_rebuild(struct net
*net
)
923 printk(KERN_WARNING
"Route hash chain too long!\n");
924 rt_cache_invalidate(net
);
928 Short description of GC goals.
930 We want to build algorithm, which will keep routing cache
931 at some equilibrium point, when number of aged off entries
932 is kept approximately equal to newly generated ones.
934 Current expiration strength is variable "expire".
935 We try to adjust it dynamically, so that if networking
936 is idle expires is large enough to keep enough of warm entries,
937 and when load increases it reduces to limit cache size.
940 static int rt_garbage_collect(struct dst_ops
*ops
)
942 static unsigned long expire
= RT_GC_TIMEOUT
;
943 static unsigned long last_gc
;
945 static int equilibrium
;
946 struct rtable
*rth
, **rthp
;
947 unsigned long now
= jiffies
;
951 * Garbage collection is pretty expensive,
952 * do not make it too frequently.
955 RT_CACHE_STAT_INC(gc_total
);
957 if (now
- last_gc
< ip_rt_gc_min_interval
&&
958 atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
) {
959 RT_CACHE_STAT_INC(gc_ignored
);
963 /* Calculate number of entries, which we want to expire now. */
964 goal
= atomic_read(&ipv4_dst_ops
.entries
) -
965 (ip_rt_gc_elasticity
<< rt_hash_log
);
967 if (equilibrium
< ipv4_dst_ops
.gc_thresh
)
968 equilibrium
= ipv4_dst_ops
.gc_thresh
;
969 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
971 equilibrium
+= min_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
972 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
975 /* We are in dangerous area. Try to reduce cache really
978 goal
= max_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
979 equilibrium
= atomic_read(&ipv4_dst_ops
.entries
) - goal
;
982 if (now
- last_gc
>= ip_rt_gc_min_interval
)
993 for (i
= rt_hash_mask
, k
= rover
; i
>= 0; i
--) {
994 unsigned long tmo
= expire
;
996 k
= (k
+ 1) & rt_hash_mask
;
997 rthp
= &rt_hash_table
[k
].chain
;
998 spin_lock_bh(rt_hash_lock_addr(k
));
999 while ((rth
= *rthp
) != NULL
) {
1000 if (!rt_is_expired(rth
) &&
1001 !rt_may_expire(rth
, tmo
, expire
)) {
1003 rthp
= &rth
->u
.dst
.rt_next
;
1006 *rthp
= rth
->u
.dst
.rt_next
;
1010 spin_unlock_bh(rt_hash_lock_addr(k
));
1019 /* Goal is not achieved. We stop process if:
1021 - if expire reduced to zero. Otherwise, expire is halfed.
1022 - if table is not full.
1023 - if we are called from interrupt.
1024 - jiffies check is just fallback/debug loop breaker.
1025 We will not spin here for long time in any case.
1028 RT_CACHE_STAT_INC(gc_goal_miss
);
1034 #if RT_CACHE_DEBUG >= 2
1035 printk(KERN_DEBUG
"expire>> %u %d %d %d\n", expire
,
1036 atomic_read(&ipv4_dst_ops
.entries
), goal
, i
);
1039 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
1041 } while (!in_softirq() && time_before_eq(jiffies
, now
));
1043 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
1045 if (net_ratelimit())
1046 printk(KERN_WARNING
"dst cache overflow\n");
1047 RT_CACHE_STAT_INC(gc_dst_overflow
);
1051 expire
+= ip_rt_gc_min_interval
;
1052 if (expire
> ip_rt_gc_timeout
||
1053 atomic_read(&ipv4_dst_ops
.entries
) < ipv4_dst_ops
.gc_thresh
)
1054 expire
= ip_rt_gc_timeout
;
1055 #if RT_CACHE_DEBUG >= 2
1056 printk(KERN_DEBUG
"expire++ %u %d %d %d\n", expire
,
1057 atomic_read(&ipv4_dst_ops
.entries
), goal
, rover
);
1063 * Returns number of entries in a hash chain that have different hash_inputs
1065 static int slow_chain_length(const struct rtable
*head
)
1068 const struct rtable
*rth
= head
;
1071 length
+= has_noalias(head
, rth
);
1072 rth
= rth
->u
.dst
.rt_next
;
1074 return length
>> FRACT_BITS
;
1077 static int rt_intern_hash(unsigned hash
, struct rtable
*rt
,
1078 struct rtable
**rp
, struct sk_buff
*skb
, int ifindex
)
1080 struct rtable
*rth
, **rthp
;
1082 struct rtable
*cand
, **candp
;
1085 int attempts
= !in_softirq();
1089 min_score
= ~(u32
)0;
1094 if (!rt_caching(dev_net(rt
->u
.dst
.dev
))) {
1096 * If we're not caching, just tell the caller we
1097 * were successful and don't touch the route. The
1098 * caller hold the sole reference to the cache entry, and
1099 * it will be released when the caller is done with it.
1100 * If we drop it here, the callers have no way to resolve routes
1101 * when we're not caching. Instead, just point *rp at rt, so
1102 * the caller gets a single use out of the route
1103 * Note that we do rt_free on this new route entry, so that
1104 * once its refcount hits zero, we are still able to reap it
1106 * Note also the rt_free uses call_rcu. We don't actually
1107 * need rcu protection here, this is just our path to get
1108 * on the route gc list.
1111 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
1112 int err
= arp_bind_neighbour(&rt
->u
.dst
);
1114 if (net_ratelimit())
1116 "Neighbour table failure & not caching routes.\n");
1126 rthp
= &rt_hash_table
[hash
].chain
;
1128 spin_lock_bh(rt_hash_lock_addr(hash
));
1129 while ((rth
= *rthp
) != NULL
) {
1130 if (rt_is_expired(rth
)) {
1131 *rthp
= rth
->u
.dst
.rt_next
;
1135 if (compare_keys(&rth
->fl
, &rt
->fl
) && compare_netns(rth
, rt
)) {
1137 *rthp
= rth
->u
.dst
.rt_next
;
1139 * Since lookup is lockfree, the deletion
1140 * must be visible to another weakly ordered CPU before
1141 * the insertion at the start of the hash chain.
1143 rcu_assign_pointer(rth
->u
.dst
.rt_next
,
1144 rt_hash_table
[hash
].chain
);
1146 * Since lookup is lockfree, the update writes
1147 * must be ordered for consistency on SMP.
1149 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rth
);
1151 dst_use(&rth
->u
.dst
, now
);
1152 spin_unlock_bh(rt_hash_lock_addr(hash
));
1158 skb_dst_set(skb
, &rth
->u
.dst
);
1162 if (!atomic_read(&rth
->u
.dst
.__refcnt
)) {
1163 u32 score
= rt_score(rth
);
1165 if (score
<= min_score
) {
1174 rthp
= &rth
->u
.dst
.rt_next
;
1178 /* ip_rt_gc_elasticity used to be average length of chain
1179 * length, when exceeded gc becomes really aggressive.
1181 * The second limit is less certain. At the moment it allows
1182 * only 2 entries per bucket. We will see.
1184 if (chain_length
> ip_rt_gc_elasticity
) {
1185 *candp
= cand
->u
.dst
.rt_next
;
1189 if (chain_length
> rt_chain_length_max
&&
1190 slow_chain_length(rt_hash_table
[hash
].chain
) > rt_chain_length_max
) {
1191 struct net
*net
= dev_net(rt
->u
.dst
.dev
);
1192 int num
= ++net
->ipv4
.current_rt_cache_rebuild_count
;
1193 if (!rt_caching(net
)) {
1194 printk(KERN_WARNING
"%s: %d rebuilds is over limit, route caching disabled\n",
1195 rt
->u
.dst
.dev
->name
, num
);
1197 rt_emergency_hash_rebuild(net
);
1198 spin_unlock_bh(rt_hash_lock_addr(hash
));
1200 hash
= rt_hash(rt
->fl
.fl4_dst
, rt
->fl
.fl4_src
,
1201 ifindex
, rt_genid(net
));
1206 /* Try to bind route to arp only if it is output
1207 route or unicast forwarding path.
1209 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
1210 int err
= arp_bind_neighbour(&rt
->u
.dst
);
1212 spin_unlock_bh(rt_hash_lock_addr(hash
));
1214 if (err
!= -ENOBUFS
) {
1219 /* Neighbour tables are full and nothing
1220 can be released. Try to shrink route cache,
1221 it is most likely it holds some neighbour records.
1223 if (attempts
-- > 0) {
1224 int saved_elasticity
= ip_rt_gc_elasticity
;
1225 int saved_int
= ip_rt_gc_min_interval
;
1226 ip_rt_gc_elasticity
= 1;
1227 ip_rt_gc_min_interval
= 0;
1228 rt_garbage_collect(&ipv4_dst_ops
);
1229 ip_rt_gc_min_interval
= saved_int
;
1230 ip_rt_gc_elasticity
= saved_elasticity
;
1234 if (net_ratelimit())
1235 printk(KERN_WARNING
"Neighbour table overflow.\n");
1241 rt
->u
.dst
.rt_next
= rt_hash_table
[hash
].chain
;
1243 #if RT_CACHE_DEBUG >= 2
1244 if (rt
->u
.dst
.rt_next
) {
1246 printk(KERN_DEBUG
"rt_cache @%02x: %pI4",
1248 for (trt
= rt
->u
.dst
.rt_next
; trt
; trt
= trt
->u
.dst
.rt_next
)
1249 printk(" . %pI4", &trt
->rt_dst
);
1254 * Since lookup is lockfree, we must make sure
1255 * previous writes to rt are comitted to memory
1256 * before making rt visible to other CPUS.
1258 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rt
);
1260 spin_unlock_bh(rt_hash_lock_addr(hash
));
1266 skb_dst_set(skb
, &rt
->u
.dst
);
1270 void rt_bind_peer(struct rtable
*rt
, int create
)
1272 static DEFINE_SPINLOCK(rt_peer_lock
);
1273 struct inet_peer
*peer
;
1275 peer
= inet_getpeer(rt
->rt_dst
, create
);
1277 spin_lock_bh(&rt_peer_lock
);
1278 if (rt
->peer
== NULL
) {
1282 spin_unlock_bh(&rt_peer_lock
);
1288 * Peer allocation may fail only in serious out-of-memory conditions. However
1289 * we still can generate some output.
1290 * Random ID selection looks a bit dangerous because we have no chances to
1291 * select ID being unique in a reasonable period of time.
1292 * But broken packet identifier may be better than no packet at all.
1294 static void ip_select_fb_ident(struct iphdr
*iph
)
1296 static DEFINE_SPINLOCK(ip_fb_id_lock
);
1297 static u32 ip_fallback_id
;
1300 spin_lock_bh(&ip_fb_id_lock
);
1301 salt
= secure_ip_id((__force __be32
)ip_fallback_id
^ iph
->daddr
);
1302 iph
->id
= htons(salt
& 0xFFFF);
1303 ip_fallback_id
= salt
;
1304 spin_unlock_bh(&ip_fb_id_lock
);
1307 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
1309 struct rtable
*rt
= (struct rtable
*) dst
;
1312 if (rt
->peer
== NULL
)
1313 rt_bind_peer(rt
, 1);
1315 /* If peer is attached to destination, it is never detached,
1316 so that we need not to grab a lock to dereference it.
1319 iph
->id
= htons(inet_getid(rt
->peer
, more
));
1323 printk(KERN_DEBUG
"rt_bind_peer(0) @%p\n",
1324 __builtin_return_address(0));
1326 ip_select_fb_ident(iph
);
1329 static void rt_del(unsigned hash
, struct rtable
*rt
)
1331 struct rtable
**rthp
, *aux
;
1333 rthp
= &rt_hash_table
[hash
].chain
;
1334 spin_lock_bh(rt_hash_lock_addr(hash
));
1336 while ((aux
= *rthp
) != NULL
) {
1337 if (aux
== rt
|| rt_is_expired(aux
)) {
1338 *rthp
= aux
->u
.dst
.rt_next
;
1342 rthp
= &aux
->u
.dst
.rt_next
;
1344 spin_unlock_bh(rt_hash_lock_addr(hash
));
1347 void ip_rt_redirect(__be32 old_gw
, __be32 daddr
, __be32 new_gw
,
1348 __be32 saddr
, struct net_device
*dev
)
1351 struct in_device
*in_dev
= in_dev_get(dev
);
1352 struct rtable
*rth
, **rthp
;
1353 __be32 skeys
[2] = { saddr
, 0 };
1354 int ikeys
[2] = { dev
->ifindex
, 0 };
1355 struct netevent_redirect netevent
;
1362 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
) ||
1363 ipv4_is_multicast(new_gw
) || ipv4_is_lbcast(new_gw
) ||
1364 ipv4_is_zeronet(new_gw
))
1365 goto reject_redirect
;
1367 if (!rt_caching(net
))
1368 goto reject_redirect
;
1370 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
1371 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
1372 goto reject_redirect
;
1373 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
1374 goto reject_redirect
;
1376 if (inet_addr_type(net
, new_gw
) != RTN_UNICAST
)
1377 goto reject_redirect
;
1380 for (i
= 0; i
< 2; i
++) {
1381 for (k
= 0; k
< 2; k
++) {
1382 unsigned hash
= rt_hash(daddr
, skeys
[i
], ikeys
[k
],
1385 rthp
=&rt_hash_table
[hash
].chain
;
1388 while ((rth
= rcu_dereference(*rthp
)) != NULL
) {
1391 if (rth
->fl
.fl4_dst
!= daddr
||
1392 rth
->fl
.fl4_src
!= skeys
[i
] ||
1393 rth
->fl
.oif
!= ikeys
[k
] ||
1395 rt_is_expired(rth
) ||
1396 !net_eq(dev_net(rth
->u
.dst
.dev
), net
)) {
1397 rthp
= &rth
->u
.dst
.rt_next
;
1401 if (rth
->rt_dst
!= daddr
||
1402 rth
->rt_src
!= saddr
||
1404 rth
->rt_gateway
!= old_gw
||
1405 rth
->u
.dst
.dev
!= dev
)
1408 dst_hold(&rth
->u
.dst
);
1411 rt
= dst_alloc(&ipv4_dst_ops
);
1418 /* Copy all the information. */
1420 rt
->u
.dst
.__use
= 1;
1421 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1422 rt
->u
.dst
.child
= NULL
;
1424 dev_hold(rt
->u
.dst
.dev
);
1426 in_dev_hold(rt
->idev
);
1427 rt
->u
.dst
.obsolete
= -1;
1428 rt
->u
.dst
.lastuse
= jiffies
;
1429 rt
->u
.dst
.path
= &rt
->u
.dst
;
1430 rt
->u
.dst
.neighbour
= NULL
;
1431 rt
->u
.dst
.hh
= NULL
;
1433 rt
->u
.dst
.xfrm
= NULL
;
1435 rt
->rt_genid
= rt_genid(net
);
1436 rt
->rt_flags
|= RTCF_REDIRECTED
;
1438 /* Gateway is different ... */
1439 rt
->rt_gateway
= new_gw
;
1441 /* Redirect received -> path was valid */
1442 dst_confirm(&rth
->u
.dst
);
1445 atomic_inc(&rt
->peer
->refcnt
);
1447 if (arp_bind_neighbour(&rt
->u
.dst
) ||
1448 !(rt
->u
.dst
.neighbour
->nud_state
&
1450 if (rt
->u
.dst
.neighbour
)
1451 neigh_event_send(rt
->u
.dst
.neighbour
, NULL
);
1457 netevent
.old
= &rth
->u
.dst
;
1458 netevent
.new = &rt
->u
.dst
;
1459 call_netevent_notifiers(NETEVENT_REDIRECT
,
1463 if (!rt_intern_hash(hash
, rt
, &rt
, NULL
, rt
->fl
.oif
))
1476 #ifdef CONFIG_IP_ROUTE_VERBOSE
1477 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
1478 printk(KERN_INFO
"Redirect from %pI4 on %s about %pI4 ignored.\n"
1479 " Advised path = %pI4 -> %pI4\n",
1480 &old_gw
, dev
->name
, &new_gw
,
1486 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
1488 struct rtable
*rt
= (struct rtable
*)dst
;
1489 struct dst_entry
*ret
= dst
;
1492 if (dst
->obsolete
> 0) {
1495 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
1496 (rt
->u
.dst
.expires
&&
1497 time_after_eq(jiffies
, rt
->u
.dst
.expires
))) {
1498 unsigned hash
= rt_hash(rt
->fl
.fl4_dst
, rt
->fl
.fl4_src
,
1500 rt_genid(dev_net(dst
->dev
)));
1501 #if RT_CACHE_DEBUG >= 1
1502 printk(KERN_DEBUG
"ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1503 &rt
->rt_dst
, rt
->fl
.fl4_tos
);
1514 * 1. The first ip_rt_redirect_number redirects are sent
1515 * with exponential backoff, then we stop sending them at all,
1516 * assuming that the host ignores our redirects.
1517 * 2. If we did not see packets requiring redirects
1518 * during ip_rt_redirect_silence, we assume that the host
1519 * forgot redirected route and start to send redirects again.
1521 * This algorithm is much cheaper and more intelligent than dumb load limiting
1524 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1525 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1528 void ip_rt_send_redirect(struct sk_buff
*skb
)
1530 struct rtable
*rt
= skb_rtable(skb
);
1531 struct in_device
*in_dev
;
1535 in_dev
= __in_dev_get_rcu(rt
->u
.dst
.dev
);
1536 if (!in_dev
|| !IN_DEV_TX_REDIRECTS(in_dev
)) {
1540 log_martians
= IN_DEV_LOG_MARTIANS(in_dev
);
1543 /* No redirected packets during ip_rt_redirect_silence;
1544 * reset the algorithm.
1546 if (time_after(jiffies
, rt
->u
.dst
.rate_last
+ ip_rt_redirect_silence
))
1547 rt
->u
.dst
.rate_tokens
= 0;
1549 /* Too many ignored redirects; do not send anything
1550 * set u.dst.rate_last to the last seen redirected packet.
1552 if (rt
->u
.dst
.rate_tokens
>= ip_rt_redirect_number
) {
1553 rt
->u
.dst
.rate_last
= jiffies
;
1557 /* Check for load limit; set rate_last to the latest sent
1560 if (rt
->u
.dst
.rate_tokens
== 0 ||
1562 (rt
->u
.dst
.rate_last
+
1563 (ip_rt_redirect_load
<< rt
->u
.dst
.rate_tokens
)))) {
1564 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
1565 rt
->u
.dst
.rate_last
= jiffies
;
1566 ++rt
->u
.dst
.rate_tokens
;
1567 #ifdef CONFIG_IP_ROUTE_VERBOSE
1569 rt
->u
.dst
.rate_tokens
== ip_rt_redirect_number
&&
1571 printk(KERN_WARNING
"host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1572 &rt
->rt_src
, rt
->rt_iif
,
1573 &rt
->rt_dst
, &rt
->rt_gateway
);
1578 static int ip_error(struct sk_buff
*skb
)
1580 struct rtable
*rt
= skb_rtable(skb
);
1584 switch (rt
->u
.dst
.error
) {
1589 code
= ICMP_HOST_UNREACH
;
1592 code
= ICMP_NET_UNREACH
;
1593 IP_INC_STATS_BH(dev_net(rt
->u
.dst
.dev
),
1594 IPSTATS_MIB_INNOROUTES
);
1597 code
= ICMP_PKT_FILTERED
;
1602 rt
->u
.dst
.rate_tokens
+= now
- rt
->u
.dst
.rate_last
;
1603 if (rt
->u
.dst
.rate_tokens
> ip_rt_error_burst
)
1604 rt
->u
.dst
.rate_tokens
= ip_rt_error_burst
;
1605 rt
->u
.dst
.rate_last
= now
;
1606 if (rt
->u
.dst
.rate_tokens
>= ip_rt_error_cost
) {
1607 rt
->u
.dst
.rate_tokens
-= ip_rt_error_cost
;
1608 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
1611 out
: kfree_skb(skb
);
1616 * The last two values are not from the RFC but
1617 * are needed for AMPRnet AX.25 paths.
1620 static const unsigned short mtu_plateau
[] =
1621 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1623 static inline unsigned short guess_mtu(unsigned short old_mtu
)
1627 for (i
= 0; i
< ARRAY_SIZE(mtu_plateau
); i
++)
1628 if (old_mtu
> mtu_plateau
[i
])
1629 return mtu_plateau
[i
];
1633 unsigned short ip_rt_frag_needed(struct net
*net
, struct iphdr
*iph
,
1634 unsigned short new_mtu
,
1635 struct net_device
*dev
)
1638 unsigned short old_mtu
= ntohs(iph
->tot_len
);
1640 int ikeys
[2] = { dev
->ifindex
, 0 };
1641 __be32 skeys
[2] = { iph
->saddr
, 0, };
1642 __be32 daddr
= iph
->daddr
;
1643 unsigned short est_mtu
= 0;
1645 for (k
= 0; k
< 2; k
++) {
1646 for (i
= 0; i
< 2; i
++) {
1647 unsigned hash
= rt_hash(daddr
, skeys
[i
], ikeys
[k
],
1651 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
1652 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
1653 unsigned short mtu
= new_mtu
;
1655 if (rth
->fl
.fl4_dst
!= daddr
||
1656 rth
->fl
.fl4_src
!= skeys
[i
] ||
1657 rth
->rt_dst
!= daddr
||
1658 rth
->rt_src
!= iph
->saddr
||
1659 rth
->fl
.oif
!= ikeys
[k
] ||
1661 dst_metric_locked(&rth
->u
.dst
, RTAX_MTU
) ||
1662 !net_eq(dev_net(rth
->u
.dst
.dev
), net
) ||
1666 if (new_mtu
< 68 || new_mtu
>= old_mtu
) {
1668 /* BSD 4.2 compatibility hack :-( */
1670 old_mtu
>= dst_mtu(&rth
->u
.dst
) &&
1671 old_mtu
>= 68 + (iph
->ihl
<< 2))
1672 old_mtu
-= iph
->ihl
<< 2;
1674 mtu
= guess_mtu(old_mtu
);
1676 if (mtu
<= dst_mtu(&rth
->u
.dst
)) {
1677 if (mtu
< dst_mtu(&rth
->u
.dst
)) {
1678 dst_confirm(&rth
->u
.dst
);
1679 if (mtu
< ip_rt_min_pmtu
) {
1680 mtu
= ip_rt_min_pmtu
;
1681 rth
->u
.dst
.metrics
[RTAX_LOCK
-1] |=
1684 rth
->u
.dst
.metrics
[RTAX_MTU
-1] = mtu
;
1685 dst_set_expires(&rth
->u
.dst
,
1694 return est_mtu
? : new_mtu
;
1697 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
1699 if (dst_mtu(dst
) > mtu
&& mtu
>= 68 &&
1700 !(dst_metric_locked(dst
, RTAX_MTU
))) {
1701 if (mtu
< ip_rt_min_pmtu
) {
1702 mtu
= ip_rt_min_pmtu
;
1703 dst
->metrics
[RTAX_LOCK
-1] |= (1 << RTAX_MTU
);
1705 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1706 dst_set_expires(dst
, ip_rt_mtu_expires
);
1707 call_netevent_notifiers(NETEVENT_PMTU_UPDATE
, dst
);
1711 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1713 if (rt_is_expired((struct rtable
*)dst
))
1718 static void ipv4_dst_destroy(struct dst_entry
*dst
)
1720 struct rtable
*rt
= (struct rtable
*) dst
;
1721 struct inet_peer
*peer
= rt
->peer
;
1722 struct in_device
*idev
= rt
->idev
;
1735 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
1738 struct rtable
*rt
= (struct rtable
*) dst
;
1739 struct in_device
*idev
= rt
->idev
;
1740 if (dev
!= dev_net(dev
)->loopback_dev
&& idev
&& idev
->dev
== dev
) {
1741 struct in_device
*loopback_idev
=
1742 in_dev_get(dev_net(dev
)->loopback_dev
);
1743 if (loopback_idev
) {
1744 rt
->idev
= loopback_idev
;
1750 static void ipv4_link_failure(struct sk_buff
*skb
)
1754 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1756 rt
= skb_rtable(skb
);
1758 dst_set_expires(&rt
->u
.dst
, 0);
1761 static int ip_rt_bug(struct sk_buff
*skb
)
1763 printk(KERN_DEBUG
"ip_rt_bug: %pI4 -> %pI4, %s\n",
1764 &ip_hdr(skb
)->saddr
, &ip_hdr(skb
)->daddr
,
1765 skb
->dev
? skb
->dev
->name
: "?");
1771 We do not cache source address of outgoing interface,
1772 because it is used only by IP RR, TS and SRR options,
1773 so that it out of fast path.
1775 BTW remember: "addr" is allowed to be not aligned
1779 void ip_rt_get_source(u8
*addr
, struct rtable
*rt
)
1782 struct fib_result res
;
1784 if (rt
->fl
.iif
== 0)
1786 else if (fib_lookup(dev_net(rt
->u
.dst
.dev
), &rt
->fl
, &res
) == 0) {
1787 src
= FIB_RES_PREFSRC(res
);
1790 src
= inet_select_addr(rt
->u
.dst
.dev
, rt
->rt_gateway
,
1792 memcpy(addr
, &src
, 4);
1795 #ifdef CONFIG_NET_CLS_ROUTE
1796 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1798 if (!(rt
->u
.dst
.tclassid
& 0xFFFF))
1799 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF;
1800 if (!(rt
->u
.dst
.tclassid
& 0xFFFF0000))
1801 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF0000;
1805 static void rt_set_nexthop(struct rtable
*rt
, struct fib_result
*res
, u32 itag
)
1807 struct fib_info
*fi
= res
->fi
;
1810 if (FIB_RES_GW(*res
) &&
1811 FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
1812 rt
->rt_gateway
= FIB_RES_GW(*res
);
1813 memcpy(rt
->u
.dst
.metrics
, fi
->fib_metrics
,
1814 sizeof(rt
->u
.dst
.metrics
));
1815 if (fi
->fib_mtu
== 0) {
1816 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = rt
->u
.dst
.dev
->mtu
;
1817 if (dst_metric_locked(&rt
->u
.dst
, RTAX_MTU
) &&
1818 rt
->rt_gateway
!= rt
->rt_dst
&&
1819 rt
->u
.dst
.dev
->mtu
> 576)
1820 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = 576;
1822 #ifdef CONFIG_NET_CLS_ROUTE
1823 rt
->u
.dst
.tclassid
= FIB_RES_NH(*res
).nh_tclassid
;
1826 rt
->u
.dst
.metrics
[RTAX_MTU
-1]= rt
->u
.dst
.dev
->mtu
;
1828 if (dst_metric(&rt
->u
.dst
, RTAX_HOPLIMIT
) == 0)
1829 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = sysctl_ip_default_ttl
;
1830 if (dst_mtu(&rt
->u
.dst
) > IP_MAX_MTU
)
1831 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = IP_MAX_MTU
;
1832 if (dst_metric(&rt
->u
.dst
, RTAX_ADVMSS
) == 0)
1833 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = max_t(unsigned int, rt
->u
.dst
.dev
->mtu
- 40,
1835 if (dst_metric(&rt
->u
.dst
, RTAX_ADVMSS
) > 65535 - 40)
1836 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = 65535 - 40;
1838 #ifdef CONFIG_NET_CLS_ROUTE
1839 #ifdef CONFIG_IP_MULTIPLE_TABLES
1840 set_class_tag(rt
, fib_rules_tclass(res
));
1842 set_class_tag(rt
, itag
);
1844 rt
->rt_type
= res
->type
;
1847 static int ip_route_input_mc(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1848 u8 tos
, struct net_device
*dev
, int our
)
1853 struct in_device
*in_dev
= in_dev_get(dev
);
1856 /* Primary sanity checks. */
1861 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1862 ipv4_is_loopback(saddr
) || skb
->protocol
!= htons(ETH_P_IP
))
1865 if (ipv4_is_zeronet(saddr
)) {
1866 if (!ipv4_is_local_multicast(daddr
))
1868 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1869 } else if (fib_validate_source(saddr
, 0, tos
, 0,
1870 dev
, &spec_dst
, &itag
, 0) < 0)
1873 rth
= dst_alloc(&ipv4_dst_ops
);
1877 rth
->u
.dst
.output
= ip_rt_bug
;
1878 rth
->u
.dst
.obsolete
= -1;
1880 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1881 rth
->u
.dst
.flags
= DST_HOST
;
1882 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1883 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1884 rth
->fl
.fl4_dst
= daddr
;
1885 rth
->rt_dst
= daddr
;
1886 rth
->fl
.fl4_tos
= tos
;
1887 rth
->fl
.mark
= skb
->mark
;
1888 rth
->fl
.fl4_src
= saddr
;
1889 rth
->rt_src
= saddr
;
1890 #ifdef CONFIG_NET_CLS_ROUTE
1891 rth
->u
.dst
.tclassid
= itag
;
1894 rth
->fl
.iif
= dev
->ifindex
;
1895 rth
->u
.dst
.dev
= init_net
.loopback_dev
;
1896 dev_hold(rth
->u
.dst
.dev
);
1897 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1899 rth
->rt_gateway
= daddr
;
1900 rth
->rt_spec_dst
= spec_dst
;
1901 rth
->rt_genid
= rt_genid(dev_net(dev
));
1902 rth
->rt_flags
= RTCF_MULTICAST
;
1903 rth
->rt_type
= RTN_MULTICAST
;
1905 rth
->u
.dst
.input
= ip_local_deliver
;
1906 rth
->rt_flags
|= RTCF_LOCAL
;
1909 #ifdef CONFIG_IP_MROUTE
1910 if (!ipv4_is_local_multicast(daddr
) && IN_DEV_MFORWARD(in_dev
))
1911 rth
->u
.dst
.input
= ip_mr_input
;
1913 RT_CACHE_STAT_INC(in_slow_mc
);
1916 hash
= rt_hash(daddr
, saddr
, dev
->ifindex
, rt_genid(dev_net(dev
)));
1917 return rt_intern_hash(hash
, rth
, NULL
, skb
, dev
->ifindex
);
1929 static void ip_handle_martian_source(struct net_device
*dev
,
1930 struct in_device
*in_dev
,
1931 struct sk_buff
*skb
,
1935 RT_CACHE_STAT_INC(in_martian_src
);
1936 #ifdef CONFIG_IP_ROUTE_VERBOSE
1937 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1939 * RFC1812 recommendation, if source is martian,
1940 * the only hint is MAC header.
1942 printk(KERN_WARNING
"martian source %pI4 from %pI4, on dev %s\n",
1943 &daddr
, &saddr
, dev
->name
);
1944 if (dev
->hard_header_len
&& skb_mac_header_was_set(skb
)) {
1946 const unsigned char *p
= skb_mac_header(skb
);
1947 printk(KERN_WARNING
"ll header: ");
1948 for (i
= 0; i
< dev
->hard_header_len
; i
++, p
++) {
1950 if (i
< (dev
->hard_header_len
- 1))
1959 static int __mkroute_input(struct sk_buff
*skb
,
1960 struct fib_result
*res
,
1961 struct in_device
*in_dev
,
1962 __be32 daddr
, __be32 saddr
, u32 tos
,
1963 struct rtable
**result
)
1968 struct in_device
*out_dev
;
1973 /* get a working reference to the output device */
1974 out_dev
= in_dev_get(FIB_RES_DEV(*res
));
1975 if (out_dev
== NULL
) {
1976 if (net_ratelimit())
1977 printk(KERN_CRIT
"Bug in ip_route_input" \
1978 "_slow(). Please, report\n");
1983 err
= fib_validate_source(saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1984 in_dev
->dev
, &spec_dst
, &itag
, skb
->mark
);
1986 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1994 flags
|= RTCF_DIRECTSRC
;
1996 if (out_dev
== in_dev
&& err
&&
1997 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1998 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1999 flags
|= RTCF_DOREDIRECT
;
2001 if (skb
->protocol
!= htons(ETH_P_IP
)) {
2002 /* Not IP (i.e. ARP). Do not create route, if it is
2003 * invalid for proxy arp. DNAT routes are always valid.
2005 * Proxy arp feature have been extended to allow, ARP
2006 * replies back to the same interface, to support
2007 * Private VLAN switch technologies. See arp.c.
2009 if (out_dev
== in_dev
&&
2010 IN_DEV_PROXY_ARP_PVLAN(in_dev
) == 0) {
2017 rth
= dst_alloc(&ipv4_dst_ops
);
2023 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2024 rth
->u
.dst
.flags
= DST_HOST
;
2025 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2026 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2027 if (IN_DEV_CONF_GET(out_dev
, NOXFRM
))
2028 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2029 rth
->fl
.fl4_dst
= daddr
;
2030 rth
->rt_dst
= daddr
;
2031 rth
->fl
.fl4_tos
= tos
;
2032 rth
->fl
.mark
= skb
->mark
;
2033 rth
->fl
.fl4_src
= saddr
;
2034 rth
->rt_src
= saddr
;
2035 rth
->rt_gateway
= daddr
;
2037 rth
->fl
.iif
= in_dev
->dev
->ifindex
;
2038 rth
->u
.dst
.dev
= (out_dev
)->dev
;
2039 dev_hold(rth
->u
.dst
.dev
);
2040 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
2042 rth
->rt_spec_dst
= spec_dst
;
2044 rth
->u
.dst
.obsolete
= -1;
2045 rth
->u
.dst
.input
= ip_forward
;
2046 rth
->u
.dst
.output
= ip_output
;
2047 rth
->rt_genid
= rt_genid(dev_net(rth
->u
.dst
.dev
));
2049 rt_set_nexthop(rth
, res
, itag
);
2051 rth
->rt_flags
= flags
;
2056 /* release the working reference to the output device */
2057 in_dev_put(out_dev
);
2061 static int ip_mkroute_input(struct sk_buff
*skb
,
2062 struct fib_result
*res
,
2063 const struct flowi
*fl
,
2064 struct in_device
*in_dev
,
2065 __be32 daddr
, __be32 saddr
, u32 tos
)
2067 struct rtable
* rth
= NULL
;
2071 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2072 if (res
->fi
&& res
->fi
->fib_nhs
> 1 && fl
->oif
== 0)
2073 fib_select_multipath(fl
, res
);
2076 /* create a routing cache entry */
2077 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
2081 /* put it into the cache */
2082 hash
= rt_hash(daddr
, saddr
, fl
->iif
,
2083 rt_genid(dev_net(rth
->u
.dst
.dev
)));
2084 return rt_intern_hash(hash
, rth
, NULL
, skb
, fl
->iif
);
2088 * NOTE. We drop all the packets that has local source
2089 * addresses, because every properly looped back packet
2090 * must have correct destination already attached by output routine.
2092 * Such approach solves two big problems:
2093 * 1. Not simplex devices are handled properly.
2094 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2097 static int ip_route_input_slow(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
2098 u8 tos
, struct net_device
*dev
)
2100 struct fib_result res
;
2101 struct in_device
*in_dev
= in_dev_get(dev
);
2102 struct flowi fl
= { .nl_u
= { .ip4_u
=
2106 .scope
= RT_SCOPE_UNIVERSE
,
2109 .iif
= dev
->ifindex
};
2112 struct rtable
* rth
;
2117 struct net
* net
= dev_net(dev
);
2119 /* IP on this device is disabled. */
2124 /* Check for the most weird martians, which can be not detected
2128 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
2129 ipv4_is_loopback(saddr
))
2130 goto martian_source
;
2132 if (daddr
== htonl(0xFFFFFFFF) || (saddr
== 0 && daddr
== 0))
2135 /* Accept zero addresses only to limited broadcast;
2136 * I even do not know to fix it or not. Waiting for complains :-)
2138 if (ipv4_is_zeronet(saddr
))
2139 goto martian_source
;
2141 if (ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(daddr
) ||
2142 ipv4_is_loopback(daddr
))
2143 goto martian_destination
;
2146 * Now we are ready to route packet.
2148 if ((err
= fib_lookup(net
, &fl
, &res
)) != 0) {
2149 if (!IN_DEV_FORWARD(in_dev
))
2155 RT_CACHE_STAT_INC(in_slow_tot
);
2157 if (res
.type
== RTN_BROADCAST
)
2160 if (res
.type
== RTN_LOCAL
) {
2162 result
= fib_validate_source(saddr
, daddr
, tos
,
2163 net
->loopback_dev
->ifindex
,
2164 dev
, &spec_dst
, &itag
, skb
->mark
);
2166 goto martian_source
;
2168 flags
|= RTCF_DIRECTSRC
;
2173 if (!IN_DEV_FORWARD(in_dev
))
2175 if (res
.type
!= RTN_UNICAST
)
2176 goto martian_destination
;
2178 err
= ip_mkroute_input(skb
, &res
, &fl
, in_dev
, daddr
, saddr
, tos
);
2186 if (skb
->protocol
!= htons(ETH_P_IP
))
2189 if (ipv4_is_zeronet(saddr
))
2190 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
2192 err
= fib_validate_source(saddr
, 0, tos
, 0, dev
, &spec_dst
,
2195 goto martian_source
;
2197 flags
|= RTCF_DIRECTSRC
;
2199 flags
|= RTCF_BROADCAST
;
2200 res
.type
= RTN_BROADCAST
;
2201 RT_CACHE_STAT_INC(in_brd
);
2204 rth
= dst_alloc(&ipv4_dst_ops
);
2208 rth
->u
.dst
.output
= ip_rt_bug
;
2209 rth
->u
.dst
.obsolete
= -1;
2210 rth
->rt_genid
= rt_genid(net
);
2212 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2213 rth
->u
.dst
.flags
= DST_HOST
;
2214 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2215 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2216 rth
->fl
.fl4_dst
= daddr
;
2217 rth
->rt_dst
= daddr
;
2218 rth
->fl
.fl4_tos
= tos
;
2219 rth
->fl
.mark
= skb
->mark
;
2220 rth
->fl
.fl4_src
= saddr
;
2221 rth
->rt_src
= saddr
;
2222 #ifdef CONFIG_NET_CLS_ROUTE
2223 rth
->u
.dst
.tclassid
= itag
;
2226 rth
->fl
.iif
= dev
->ifindex
;
2227 rth
->u
.dst
.dev
= net
->loopback_dev
;
2228 dev_hold(rth
->u
.dst
.dev
);
2229 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
2230 rth
->rt_gateway
= daddr
;
2231 rth
->rt_spec_dst
= spec_dst
;
2232 rth
->u
.dst
.input
= ip_local_deliver
;
2233 rth
->rt_flags
= flags
|RTCF_LOCAL
;
2234 if (res
.type
== RTN_UNREACHABLE
) {
2235 rth
->u
.dst
.input
= ip_error
;
2236 rth
->u
.dst
.error
= -err
;
2237 rth
->rt_flags
&= ~RTCF_LOCAL
;
2239 rth
->rt_type
= res
.type
;
2240 hash
= rt_hash(daddr
, saddr
, fl
.iif
, rt_genid(net
));
2241 err
= rt_intern_hash(hash
, rth
, NULL
, skb
, fl
.iif
);
2245 RT_CACHE_STAT_INC(in_no_route
);
2246 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_UNIVERSE
);
2247 res
.type
= RTN_UNREACHABLE
;
2253 * Do not cache martian addresses: they should be logged (RFC1812)
2255 martian_destination
:
2256 RT_CACHE_STAT_INC(in_martian_dst
);
2257 #ifdef CONFIG_IP_ROUTE_VERBOSE
2258 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
2259 printk(KERN_WARNING
"martian destination %pI4 from %pI4, dev %s\n",
2260 &daddr
, &saddr
, dev
->name
);
2264 err
= -EHOSTUNREACH
;
2276 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
2280 int ip_route_input_common(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
2281 u8 tos
, struct net_device
*dev
, bool noref
)
2283 struct rtable
* rth
;
2285 int iif
= dev
->ifindex
;
2290 if (!rt_caching(net
))
2293 tos
&= IPTOS_RT_MASK
;
2294 hash
= rt_hash(daddr
, saddr
, iif
, rt_genid(net
));
2297 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2298 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
2299 if ((((__force u32
)rth
->fl
.fl4_dst
^ (__force u32
)daddr
) |
2300 ((__force u32
)rth
->fl
.fl4_src
^ (__force u32
)saddr
) |
2301 (rth
->fl
.iif
^ iif
) |
2303 (rth
->fl
.fl4_tos
^ tos
)) == 0 &&
2304 rth
->fl
.mark
== skb
->mark
&&
2305 net_eq(dev_net(rth
->u
.dst
.dev
), net
) &&
2306 !rt_is_expired(rth
)) {
2308 dst_use_noref(&rth
->u
.dst
, jiffies
);
2309 skb_dst_set_noref(skb
, &rth
->u
.dst
);
2311 dst_use(&rth
->u
.dst
, jiffies
);
2312 skb_dst_set(skb
, &rth
->u
.dst
);
2314 RT_CACHE_STAT_INC(in_hit
);
2318 RT_CACHE_STAT_INC(in_hlist_search
);
2323 /* Multicast recognition logic is moved from route cache to here.
2324 The problem was that too many Ethernet cards have broken/missing
2325 hardware multicast filters :-( As result the host on multicasting
2326 network acquires a lot of useless route cache entries, sort of
2327 SDR messages from all the world. Now we try to get rid of them.
2328 Really, provided software IP multicast filter is organized
2329 reasonably (at least, hashed), it does not result in a slowdown
2330 comparing with route cache reject entries.
2331 Note, that multicast routers are not affected, because
2332 route cache entry is created eventually.
2334 if (ipv4_is_multicast(daddr
)) {
2335 struct in_device
*in_dev
;
2338 if ((in_dev
= __in_dev_get_rcu(dev
)) != NULL
) {
2339 int our
= ip_check_mc(in_dev
, daddr
, saddr
,
2340 ip_hdr(skb
)->protocol
);
2342 #ifdef CONFIG_IP_MROUTE
2344 (!ipv4_is_local_multicast(daddr
) &&
2345 IN_DEV_MFORWARD(in_dev
))
2349 return ip_route_input_mc(skb
, daddr
, saddr
,
2356 return ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
2358 EXPORT_SYMBOL(ip_route_input_common
);
2360 static int __mkroute_output(struct rtable
**result
,
2361 struct fib_result
*res
,
2362 const struct flowi
*fl
,
2363 const struct flowi
*oldflp
,
2364 struct net_device
*dev_out
,
2368 struct in_device
*in_dev
;
2369 u32 tos
= RT_FL_TOS(oldflp
);
2372 if (ipv4_is_loopback(fl
->fl4_src
) && !(dev_out
->flags
&IFF_LOOPBACK
))
2375 if (fl
->fl4_dst
== htonl(0xFFFFFFFF))
2376 res
->type
= RTN_BROADCAST
;
2377 else if (ipv4_is_multicast(fl
->fl4_dst
))
2378 res
->type
= RTN_MULTICAST
;
2379 else if (ipv4_is_lbcast(fl
->fl4_dst
) || ipv4_is_zeronet(fl
->fl4_dst
))
2382 if (dev_out
->flags
& IFF_LOOPBACK
)
2383 flags
|= RTCF_LOCAL
;
2385 /* get work reference to inet device */
2386 in_dev
= in_dev_get(dev_out
);
2390 if (res
->type
== RTN_BROADCAST
) {
2391 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
2393 fib_info_put(res
->fi
);
2396 } else if (res
->type
== RTN_MULTICAST
) {
2397 flags
|= RTCF_MULTICAST
|RTCF_LOCAL
;
2398 if (!ip_check_mc(in_dev
, oldflp
->fl4_dst
, oldflp
->fl4_src
,
2400 flags
&= ~RTCF_LOCAL
;
2401 /* If multicast route do not exist use
2402 default one, but do not gateway in this case.
2405 if (res
->fi
&& res
->prefixlen
< 4) {
2406 fib_info_put(res
->fi
);
2412 rth
= dst_alloc(&ipv4_dst_ops
);
2418 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2419 rth
->u
.dst
.flags
= DST_HOST
;
2420 if (IN_DEV_CONF_GET(in_dev
, NOXFRM
))
2421 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2422 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2423 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2425 rth
->fl
.fl4_dst
= oldflp
->fl4_dst
;
2426 rth
->fl
.fl4_tos
= tos
;
2427 rth
->fl
.fl4_src
= oldflp
->fl4_src
;
2428 rth
->fl
.oif
= oldflp
->oif
;
2429 rth
->fl
.mark
= oldflp
->mark
;
2430 rth
->rt_dst
= fl
->fl4_dst
;
2431 rth
->rt_src
= fl
->fl4_src
;
2432 rth
->rt_iif
= oldflp
->oif
? : dev_out
->ifindex
;
2433 /* get references to the devices that are to be hold by the routing
2435 rth
->u
.dst
.dev
= dev_out
;
2437 rth
->idev
= in_dev_get(dev_out
);
2438 rth
->rt_gateway
= fl
->fl4_dst
;
2439 rth
->rt_spec_dst
= fl
->fl4_src
;
2441 rth
->u
.dst
.output
=ip_output
;
2442 rth
->u
.dst
.obsolete
= -1;
2443 rth
->rt_genid
= rt_genid(dev_net(dev_out
));
2445 RT_CACHE_STAT_INC(out_slow_tot
);
2447 if (flags
& RTCF_LOCAL
) {
2448 rth
->u
.dst
.input
= ip_local_deliver
;
2449 rth
->rt_spec_dst
= fl
->fl4_dst
;
2451 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
2452 rth
->rt_spec_dst
= fl
->fl4_src
;
2453 if (flags
& RTCF_LOCAL
&&
2454 !(dev_out
->flags
& IFF_LOOPBACK
)) {
2455 rth
->u
.dst
.output
= ip_mc_output
;
2456 RT_CACHE_STAT_INC(out_slow_mc
);
2458 #ifdef CONFIG_IP_MROUTE
2459 if (res
->type
== RTN_MULTICAST
) {
2460 if (IN_DEV_MFORWARD(in_dev
) &&
2461 !ipv4_is_local_multicast(oldflp
->fl4_dst
)) {
2462 rth
->u
.dst
.input
= ip_mr_input
;
2463 rth
->u
.dst
.output
= ip_mc_output
;
2469 rt_set_nexthop(rth
, res
, 0);
2471 rth
->rt_flags
= flags
;
2475 /* release work reference to inet device */
2481 static int ip_mkroute_output(struct rtable
**rp
,
2482 struct fib_result
*res
,
2483 const struct flowi
*fl
,
2484 const struct flowi
*oldflp
,
2485 struct net_device
*dev_out
,
2488 struct rtable
*rth
= NULL
;
2489 int err
= __mkroute_output(&rth
, res
, fl
, oldflp
, dev_out
, flags
);
2492 hash
= rt_hash(oldflp
->fl4_dst
, oldflp
->fl4_src
, oldflp
->oif
,
2493 rt_genid(dev_net(dev_out
)));
2494 err
= rt_intern_hash(hash
, rth
, rp
, NULL
, oldflp
->oif
);
2501 * Major route resolver routine.
2504 static int ip_route_output_slow(struct net
*net
, struct rtable
**rp
,
2505 const struct flowi
*oldflp
)
2507 u32 tos
= RT_FL_TOS(oldflp
);
2508 struct flowi fl
= { .nl_u
= { .ip4_u
=
2509 { .daddr
= oldflp
->fl4_dst
,
2510 .saddr
= oldflp
->fl4_src
,
2511 .tos
= tos
& IPTOS_RT_MASK
,
2512 .scope
= ((tos
& RTO_ONLINK
) ?
2516 .mark
= oldflp
->mark
,
2517 .iif
= net
->loopback_dev
->ifindex
,
2518 .oif
= oldflp
->oif
};
2519 struct fib_result res
;
2521 struct net_device
*dev_out
= NULL
;
2527 #ifdef CONFIG_IP_MULTIPLE_TABLES
2531 if (oldflp
->fl4_src
) {
2533 if (ipv4_is_multicast(oldflp
->fl4_src
) ||
2534 ipv4_is_lbcast(oldflp
->fl4_src
) ||
2535 ipv4_is_zeronet(oldflp
->fl4_src
))
2538 /* I removed check for oif == dev_out->oif here.
2539 It was wrong for two reasons:
2540 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2541 is assigned to multiple interfaces.
2542 2. Moreover, we are allowed to send packets with saddr
2543 of another iface. --ANK
2546 if (oldflp
->oif
== 0 &&
2547 (ipv4_is_multicast(oldflp
->fl4_dst
) ||
2548 oldflp
->fl4_dst
== htonl(0xFFFFFFFF))) {
2549 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2550 dev_out
= ip_dev_find(net
, oldflp
->fl4_src
);
2551 if (dev_out
== NULL
)
2554 /* Special hack: user can direct multicasts
2555 and limited broadcast via necessary interface
2556 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2557 This hack is not just for fun, it allows
2558 vic,vat and friends to work.
2559 They bind socket to loopback, set ttl to zero
2560 and expect that it will work.
2561 From the viewpoint of routing cache they are broken,
2562 because we are not allowed to build multicast path
2563 with loopback source addr (look, routing cache
2564 cannot know, that ttl is zero, so that packet
2565 will not leave this host and route is valid).
2566 Luckily, this hack is good workaround.
2569 fl
.oif
= dev_out
->ifindex
;
2573 if (!(oldflp
->flags
& FLOWI_FLAG_ANYSRC
)) {
2574 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2575 dev_out
= ip_dev_find(net
, oldflp
->fl4_src
);
2576 if (dev_out
== NULL
)
2585 dev_out
= dev_get_by_index(net
, oldflp
->oif
);
2587 if (dev_out
== NULL
)
2590 /* RACE: Check return value of inet_select_addr instead. */
2591 if (__in_dev_get_rtnl(dev_out
) == NULL
) {
2593 goto out
; /* Wrong error code */
2596 if (ipv4_is_local_multicast(oldflp
->fl4_dst
) ||
2597 oldflp
->fl4_dst
== htonl(0xFFFFFFFF)) {
2599 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2604 if (ipv4_is_multicast(oldflp
->fl4_dst
))
2605 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2607 else if (!oldflp
->fl4_dst
)
2608 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2614 fl
.fl4_dst
= fl
.fl4_src
;
2616 fl
.fl4_dst
= fl
.fl4_src
= htonl(INADDR_LOOPBACK
);
2619 dev_out
= net
->loopback_dev
;
2621 fl
.oif
= net
->loopback_dev
->ifindex
;
2622 res
.type
= RTN_LOCAL
;
2623 flags
|= RTCF_LOCAL
;
2627 if (fib_lookup(net
, &fl
, &res
)) {
2630 /* Apparently, routing tables are wrong. Assume,
2631 that the destination is on link.
2634 Because we are allowed to send to iface
2635 even if it has NO routes and NO assigned
2636 addresses. When oif is specified, routing
2637 tables are looked up with only one purpose:
2638 to catch if destination is gatewayed, rather than
2639 direct. Moreover, if MSG_DONTROUTE is set,
2640 we send packet, ignoring both routing tables
2641 and ifaddr state. --ANK
2644 We could make it even if oif is unknown,
2645 likely IPv6, but we do not.
2648 if (fl
.fl4_src
== 0)
2649 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2651 res
.type
= RTN_UNICAST
;
2661 if (res
.type
== RTN_LOCAL
) {
2663 fl
.fl4_src
= fl
.fl4_dst
;
2666 dev_out
= net
->loopback_dev
;
2668 fl
.oif
= dev_out
->ifindex
;
2670 fib_info_put(res
.fi
);
2672 flags
|= RTCF_LOCAL
;
2676 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2677 if (res
.fi
->fib_nhs
> 1 && fl
.oif
== 0)
2678 fib_select_multipath(&fl
, &res
);
2681 if (!res
.prefixlen
&& res
.type
== RTN_UNICAST
&& !fl
.oif
)
2682 fib_select_default(net
, &fl
, &res
);
2685 fl
.fl4_src
= FIB_RES_PREFSRC(res
);
2689 dev_out
= FIB_RES_DEV(res
);
2691 fl
.oif
= dev_out
->ifindex
;
2695 err
= ip_mkroute_output(rp
, &res
, &fl
, oldflp
, dev_out
, flags
);
2705 int __ip_route_output_key(struct net
*net
, struct rtable
**rp
,
2706 const struct flowi
*flp
)
2711 if (!rt_caching(net
))
2714 hash
= rt_hash(flp
->fl4_dst
, flp
->fl4_src
, flp
->oif
, rt_genid(net
));
2717 for (rth
= rcu_dereference_bh(rt_hash_table
[hash
].chain
); rth
;
2718 rth
= rcu_dereference_bh(rth
->u
.dst
.rt_next
)) {
2719 if (rth
->fl
.fl4_dst
== flp
->fl4_dst
&&
2720 rth
->fl
.fl4_src
== flp
->fl4_src
&&
2722 rth
->fl
.oif
== flp
->oif
&&
2723 rth
->fl
.mark
== flp
->mark
&&
2724 !((rth
->fl
.fl4_tos
^ flp
->fl4_tos
) &
2725 (IPTOS_RT_MASK
| RTO_ONLINK
)) &&
2726 net_eq(dev_net(rth
->u
.dst
.dev
), net
) &&
2727 !rt_is_expired(rth
)) {
2728 dst_use(&rth
->u
.dst
, jiffies
);
2729 RT_CACHE_STAT_INC(out_hit
);
2730 rcu_read_unlock_bh();
2734 RT_CACHE_STAT_INC(out_hlist_search
);
2736 rcu_read_unlock_bh();
2739 return ip_route_output_slow(net
, rp
, flp
);
2742 EXPORT_SYMBOL_GPL(__ip_route_output_key
);
2744 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
2748 static struct dst_ops ipv4_dst_blackhole_ops
= {
2750 .protocol
= cpu_to_be16(ETH_P_IP
),
2751 .destroy
= ipv4_dst_destroy
,
2752 .check
= ipv4_dst_check
,
2753 .update_pmtu
= ipv4_rt_blackhole_update_pmtu
,
2754 .entries
= ATOMIC_INIT(0),
2758 static int ipv4_dst_blackhole(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
)
2760 struct rtable
*ort
= *rp
;
2761 struct rtable
*rt
= (struct rtable
*)
2762 dst_alloc(&ipv4_dst_blackhole_ops
);
2765 struct dst_entry
*new = &rt
->u
.dst
;
2767 atomic_set(&new->__refcnt
, 1);
2769 new->input
= dst_discard
;
2770 new->output
= dst_discard
;
2771 memcpy(new->metrics
, ort
->u
.dst
.metrics
, RTAX_MAX
*sizeof(u32
));
2773 new->dev
= ort
->u
.dst
.dev
;
2779 rt
->idev
= ort
->idev
;
2781 in_dev_hold(rt
->idev
);
2782 rt
->rt_genid
= rt_genid(net
);
2783 rt
->rt_flags
= ort
->rt_flags
;
2784 rt
->rt_type
= ort
->rt_type
;
2785 rt
->rt_dst
= ort
->rt_dst
;
2786 rt
->rt_src
= ort
->rt_src
;
2787 rt
->rt_iif
= ort
->rt_iif
;
2788 rt
->rt_gateway
= ort
->rt_gateway
;
2789 rt
->rt_spec_dst
= ort
->rt_spec_dst
;
2790 rt
->peer
= ort
->peer
;
2792 atomic_inc(&rt
->peer
->refcnt
);
2797 dst_release(&(*rp
)->u
.dst
);
2799 return (rt
? 0 : -ENOMEM
);
2802 int ip_route_output_flow(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
,
2803 struct sock
*sk
, int flags
)
2807 if ((err
= __ip_route_output_key(net
, rp
, flp
)) != 0)
2812 flp
->fl4_src
= (*rp
)->rt_src
;
2814 flp
->fl4_dst
= (*rp
)->rt_dst
;
2815 err
= __xfrm_lookup(net
, (struct dst_entry
**)rp
, flp
, sk
,
2816 flags
? XFRM_LOOKUP_WAIT
: 0);
2817 if (err
== -EREMOTE
)
2818 err
= ipv4_dst_blackhole(net
, rp
, flp
);
2826 EXPORT_SYMBOL_GPL(ip_route_output_flow
);
2828 int ip_route_output_key(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
)
2830 return ip_route_output_flow(net
, rp
, flp
, NULL
, 0);
2833 static int rt_fill_info(struct net
*net
,
2834 struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
2835 int nowait
, unsigned int flags
)
2837 struct rtable
*rt
= skb_rtable(skb
);
2839 struct nlmsghdr
*nlh
;
2841 u32 id
= 0, ts
= 0, tsage
= 0, error
;
2843 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*r
), flags
);
2847 r
= nlmsg_data(nlh
);
2848 r
->rtm_family
= AF_INET
;
2849 r
->rtm_dst_len
= 32;
2851 r
->rtm_tos
= rt
->fl
.fl4_tos
;
2852 r
->rtm_table
= RT_TABLE_MAIN
;
2853 NLA_PUT_U32(skb
, RTA_TABLE
, RT_TABLE_MAIN
);
2854 r
->rtm_type
= rt
->rt_type
;
2855 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2856 r
->rtm_protocol
= RTPROT_UNSPEC
;
2857 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2858 if (rt
->rt_flags
& RTCF_NOTIFY
)
2859 r
->rtm_flags
|= RTM_F_NOTIFY
;
2861 NLA_PUT_BE32(skb
, RTA_DST
, rt
->rt_dst
);
2863 if (rt
->fl
.fl4_src
) {
2864 r
->rtm_src_len
= 32;
2865 NLA_PUT_BE32(skb
, RTA_SRC
, rt
->fl
.fl4_src
);
2868 NLA_PUT_U32(skb
, RTA_OIF
, rt
->u
.dst
.dev
->ifindex
);
2869 #ifdef CONFIG_NET_CLS_ROUTE
2870 if (rt
->u
.dst
.tclassid
)
2871 NLA_PUT_U32(skb
, RTA_FLOW
, rt
->u
.dst
.tclassid
);
2874 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_spec_dst
);
2875 else if (rt
->rt_src
!= rt
->fl
.fl4_src
)
2876 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_src
);
2878 if (rt
->rt_dst
!= rt
->rt_gateway
)
2879 NLA_PUT_BE32(skb
, RTA_GATEWAY
, rt
->rt_gateway
);
2881 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2882 goto nla_put_failure
;
2884 error
= rt
->u
.dst
.error
;
2885 expires
= rt
->u
.dst
.expires
? rt
->u
.dst
.expires
- jiffies
: 0;
2887 id
= atomic_read(&rt
->peer
->ip_id_count
) & 0xffff;
2888 if (rt
->peer
->tcp_ts_stamp
) {
2889 ts
= rt
->peer
->tcp_ts
;
2890 tsage
= get_seconds() - rt
->peer
->tcp_ts_stamp
;
2895 #ifdef CONFIG_IP_MROUTE
2896 __be32 dst
= rt
->rt_dst
;
2898 if (ipv4_is_multicast(dst
) && !ipv4_is_local_multicast(dst
) &&
2899 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)) {
2900 int err
= ipmr_get_route(net
, skb
, r
, nowait
);
2905 goto nla_put_failure
;
2907 if (err
== -EMSGSIZE
)
2908 goto nla_put_failure
;
2914 NLA_PUT_U32(skb
, RTA_IIF
, rt
->fl
.iif
);
2917 if (rtnl_put_cacheinfo(skb
, &rt
->u
.dst
, id
, ts
, tsage
,
2918 expires
, error
) < 0)
2919 goto nla_put_failure
;
2921 return nlmsg_end(skb
, nlh
);
2924 nlmsg_cancel(skb
, nlh
);
2928 static int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2930 struct net
*net
= sock_net(in_skb
->sk
);
2932 struct nlattr
*tb
[RTA_MAX
+1];
2933 struct rtable
*rt
= NULL
;
2938 struct sk_buff
*skb
;
2940 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv4_policy
);
2944 rtm
= nlmsg_data(nlh
);
2946 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2952 /* Reserve room for dummy headers, this skb can pass
2953 through good chunk of routing engine.
2955 skb_reset_mac_header(skb
);
2956 skb_reset_network_header(skb
);
2958 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2959 ip_hdr(skb
)->protocol
= IPPROTO_ICMP
;
2960 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2962 src
= tb
[RTA_SRC
] ? nla_get_be32(tb
[RTA_SRC
]) : 0;
2963 dst
= tb
[RTA_DST
] ? nla_get_be32(tb
[RTA_DST
]) : 0;
2964 iif
= tb
[RTA_IIF
] ? nla_get_u32(tb
[RTA_IIF
]) : 0;
2967 struct net_device
*dev
;
2969 dev
= __dev_get_by_index(net
, iif
);
2975 skb
->protocol
= htons(ETH_P_IP
);
2978 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2981 rt
= skb_rtable(skb
);
2982 if (err
== 0 && rt
->u
.dst
.error
)
2983 err
= -rt
->u
.dst
.error
;
2990 .tos
= rtm
->rtm_tos
,
2993 .oif
= tb
[RTA_OIF
] ? nla_get_u32(tb
[RTA_OIF
]) : 0,
2995 err
= ip_route_output_key(net
, &rt
, &fl
);
3001 skb_dst_set(skb
, &rt
->u
.dst
);
3002 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
3003 rt
->rt_flags
|= RTCF_NOTIFY
;
3005 err
= rt_fill_info(net
, skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
3006 RTM_NEWROUTE
, 0, 0);
3010 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).pid
);
3019 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3026 net
= sock_net(skb
->sk
);
3031 s_idx
= idx
= cb
->args
[1];
3032 for (h
= s_h
; h
<= rt_hash_mask
; h
++, s_idx
= 0) {
3033 if (!rt_hash_table
[h
].chain
)
3036 for (rt
= rcu_dereference_bh(rt_hash_table
[h
].chain
), idx
= 0; rt
;
3037 rt
= rcu_dereference_bh(rt
->u
.dst
.rt_next
), idx
++) {
3038 if (!net_eq(dev_net(rt
->u
.dst
.dev
), net
) || idx
< s_idx
)
3040 if (rt_is_expired(rt
))
3042 skb_dst_set_noref(skb
, &rt
->u
.dst
);
3043 if (rt_fill_info(net
, skb
, NETLINK_CB(cb
->skb
).pid
,
3044 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
3045 1, NLM_F_MULTI
) <= 0) {
3047 rcu_read_unlock_bh();
3052 rcu_read_unlock_bh();
3061 void ip_rt_multicast_event(struct in_device
*in_dev
)
3063 rt_cache_flush(dev_net(in_dev
->dev
), 0);
3066 #ifdef CONFIG_SYSCTL
3067 static int ipv4_sysctl_rtcache_flush(ctl_table
*__ctl
, int write
,
3068 void __user
*buffer
,
3069 size_t *lenp
, loff_t
*ppos
)
3076 memcpy(&ctl
, __ctl
, sizeof(ctl
));
3077 ctl
.data
= &flush_delay
;
3078 proc_dointvec(&ctl
, write
, buffer
, lenp
, ppos
);
3080 net
= (struct net
*)__ctl
->extra1
;
3081 rt_cache_flush(net
, flush_delay
);
3088 static ctl_table ipv4_route_table
[] = {
3090 .procname
= "gc_thresh",
3091 .data
= &ipv4_dst_ops
.gc_thresh
,
3092 .maxlen
= sizeof(int),
3094 .proc_handler
= proc_dointvec
,
3097 .procname
= "max_size",
3098 .data
= &ip_rt_max_size
,
3099 .maxlen
= sizeof(int),
3101 .proc_handler
= proc_dointvec
,
3104 /* Deprecated. Use gc_min_interval_ms */
3106 .procname
= "gc_min_interval",
3107 .data
= &ip_rt_gc_min_interval
,
3108 .maxlen
= sizeof(int),
3110 .proc_handler
= proc_dointvec_jiffies
,
3113 .procname
= "gc_min_interval_ms",
3114 .data
= &ip_rt_gc_min_interval
,
3115 .maxlen
= sizeof(int),
3117 .proc_handler
= proc_dointvec_ms_jiffies
,
3120 .procname
= "gc_timeout",
3121 .data
= &ip_rt_gc_timeout
,
3122 .maxlen
= sizeof(int),
3124 .proc_handler
= proc_dointvec_jiffies
,
3127 .procname
= "gc_interval",
3128 .data
= &ip_rt_gc_interval
,
3129 .maxlen
= sizeof(int),
3131 .proc_handler
= proc_dointvec_jiffies
,
3134 .procname
= "redirect_load",
3135 .data
= &ip_rt_redirect_load
,
3136 .maxlen
= sizeof(int),
3138 .proc_handler
= proc_dointvec
,
3141 .procname
= "redirect_number",
3142 .data
= &ip_rt_redirect_number
,
3143 .maxlen
= sizeof(int),
3145 .proc_handler
= proc_dointvec
,
3148 .procname
= "redirect_silence",
3149 .data
= &ip_rt_redirect_silence
,
3150 .maxlen
= sizeof(int),
3152 .proc_handler
= proc_dointvec
,
3155 .procname
= "error_cost",
3156 .data
= &ip_rt_error_cost
,
3157 .maxlen
= sizeof(int),
3159 .proc_handler
= proc_dointvec
,
3162 .procname
= "error_burst",
3163 .data
= &ip_rt_error_burst
,
3164 .maxlen
= sizeof(int),
3166 .proc_handler
= proc_dointvec
,
3169 .procname
= "gc_elasticity",
3170 .data
= &ip_rt_gc_elasticity
,
3171 .maxlen
= sizeof(int),
3173 .proc_handler
= proc_dointvec
,
3176 .procname
= "mtu_expires",
3177 .data
= &ip_rt_mtu_expires
,
3178 .maxlen
= sizeof(int),
3180 .proc_handler
= proc_dointvec_jiffies
,
3183 .procname
= "min_pmtu",
3184 .data
= &ip_rt_min_pmtu
,
3185 .maxlen
= sizeof(int),
3187 .proc_handler
= proc_dointvec
,
3190 .procname
= "min_adv_mss",
3191 .data
= &ip_rt_min_advmss
,
3192 .maxlen
= sizeof(int),
3194 .proc_handler
= proc_dointvec
,
3199 static struct ctl_table empty
[1];
3201 static struct ctl_table ipv4_skeleton
[] =
3203 { .procname
= "route",
3204 .mode
= 0555, .child
= ipv4_route_table
},
3205 { .procname
= "neigh",
3206 .mode
= 0555, .child
= empty
},
3210 static __net_initdata
struct ctl_path ipv4_path
[] = {
3211 { .procname
= "net", },
3212 { .procname
= "ipv4", },
3216 static struct ctl_table ipv4_route_flush_table
[] = {
3218 .procname
= "flush",
3219 .maxlen
= sizeof(int),
3221 .proc_handler
= ipv4_sysctl_rtcache_flush
,
3226 static __net_initdata
struct ctl_path ipv4_route_path
[] = {
3227 { .procname
= "net", },
3228 { .procname
= "ipv4", },
3229 { .procname
= "route", },
3233 static __net_init
int sysctl_route_net_init(struct net
*net
)
3235 struct ctl_table
*tbl
;
3237 tbl
= ipv4_route_flush_table
;
3238 if (!net_eq(net
, &init_net
)) {
3239 tbl
= kmemdup(tbl
, sizeof(ipv4_route_flush_table
), GFP_KERNEL
);
3243 tbl
[0].extra1
= net
;
3245 net
->ipv4
.route_hdr
=
3246 register_net_sysctl_table(net
, ipv4_route_path
, tbl
);
3247 if (net
->ipv4
.route_hdr
== NULL
)
3252 if (tbl
!= ipv4_route_flush_table
)
3258 static __net_exit
void sysctl_route_net_exit(struct net
*net
)
3260 struct ctl_table
*tbl
;
3262 tbl
= net
->ipv4
.route_hdr
->ctl_table_arg
;
3263 unregister_net_sysctl_table(net
->ipv4
.route_hdr
);
3264 BUG_ON(tbl
== ipv4_route_flush_table
);
3268 static __net_initdata
struct pernet_operations sysctl_route_ops
= {
3269 .init
= sysctl_route_net_init
,
3270 .exit
= sysctl_route_net_exit
,
3274 static __net_init
int rt_genid_init(struct net
*net
)
3276 get_random_bytes(&net
->ipv4
.rt_genid
,
3277 sizeof(net
->ipv4
.rt_genid
));
3281 static __net_initdata
struct pernet_operations rt_genid_ops
= {
3282 .init
= rt_genid_init
,
3286 #ifdef CONFIG_NET_CLS_ROUTE
3287 struct ip_rt_acct __percpu
*ip_rt_acct __read_mostly
;
3288 #endif /* CONFIG_NET_CLS_ROUTE */
3290 static __initdata
unsigned long rhash_entries
;
3291 static int __init
set_rhash_entries(char *str
)
3295 rhash_entries
= simple_strtoul(str
, &str
, 0);
3298 __setup("rhash_entries=", set_rhash_entries
);
3300 int __init
ip_rt_init(void)
3304 #ifdef CONFIG_NET_CLS_ROUTE
3305 ip_rt_acct
= __alloc_percpu(256 * sizeof(struct ip_rt_acct
), __alignof__(struct ip_rt_acct
));
3307 panic("IP: failed to allocate ip_rt_acct\n");
3310 ipv4_dst_ops
.kmem_cachep
=
3311 kmem_cache_create("ip_dst_cache", sizeof(struct rtable
), 0,
3312 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3314 ipv4_dst_blackhole_ops
.kmem_cachep
= ipv4_dst_ops
.kmem_cachep
;
3316 rt_hash_table
= (struct rt_hash_bucket
*)
3317 alloc_large_system_hash("IP route cache",
3318 sizeof(struct rt_hash_bucket
),
3320 (totalram_pages
>= 128 * 1024) ?
3325 rhash_entries
? 0 : 512 * 1024);
3326 memset(rt_hash_table
, 0, (rt_hash_mask
+ 1) * sizeof(struct rt_hash_bucket
));
3327 rt_hash_lock_init();
3329 ipv4_dst_ops
.gc_thresh
= (rt_hash_mask
+ 1);
3330 ip_rt_max_size
= (rt_hash_mask
+ 1) * 16;
3335 /* All the timers, started at system startup tend
3336 to synchronize. Perturb it a bit.
3338 INIT_DELAYED_WORK_DEFERRABLE(&expires_work
, rt_worker_func
);
3339 expires_ljiffies
= jiffies
;
3340 schedule_delayed_work(&expires_work
,
3341 net_random() % ip_rt_gc_interval
+ ip_rt_gc_interval
);
3343 if (ip_rt_proc_init())
3344 printk(KERN_ERR
"Unable to create route proc files\n");
3347 xfrm4_init(ip_rt_max_size
);
3349 rtnl_register(PF_INET
, RTM_GETROUTE
, inet_rtm_getroute
, NULL
);
3351 #ifdef CONFIG_SYSCTL
3352 register_pernet_subsys(&sysctl_route_ops
);
3354 register_pernet_subsys(&rt_genid_ops
);
3358 #ifdef CONFIG_SYSCTL
3360 * We really need to sanitize the damn ipv4 init order, then all
3361 * this nonsense will go away.
3363 void __init
ip_static_sysctl_init(void)
3365 register_sysctl_paths(ipv4_path
, ipv4_skeleton
);
3369 EXPORT_SYMBOL(__ip_select_ident
);
3370 EXPORT_SYMBOL(ip_route_output_key
);