2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
58 * This program is free software; you can redistribute it and/or
59 * modify it under the terms of the GNU General Public License
60 * as published by the Free Software Foundation; either version
61 * 2 of the License, or (at your option) any later version.
64 #include <linux/config.h>
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
93 #include <net/protocol.h>
95 #include <net/route.h>
96 #include <net/inetpeer.h>
98 #include <net/ip_fib.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/ip_mp_alg.h>
105 #include <linux/sysctl.h>
108 #define RT_FL_TOS(oldflp) \
109 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
111 #define IP_MAX_MTU 0xFFF0
113 #define RT_GC_TIMEOUT (300*HZ)
115 static int ip_rt_min_delay
= 2 * HZ
;
116 static int ip_rt_max_delay
= 10 * HZ
;
117 static int ip_rt_max_size
;
118 static int ip_rt_gc_timeout
= RT_GC_TIMEOUT
;
119 static int ip_rt_gc_interval
= 60 * HZ
;
120 static int ip_rt_gc_min_interval
= HZ
/ 2;
121 static int ip_rt_redirect_number
= 9;
122 static int ip_rt_redirect_load
= HZ
/ 50;
123 static int ip_rt_redirect_silence
= ((HZ
/ 50) << (9 + 1));
124 static int ip_rt_error_cost
= HZ
;
125 static int ip_rt_error_burst
= 5 * HZ
;
126 static int ip_rt_gc_elasticity
= 8;
127 static int ip_rt_mtu_expires
= 10 * 60 * HZ
;
128 static int ip_rt_min_pmtu
= 512 + 20 + 20;
129 static int ip_rt_min_advmss
= 256;
130 static int ip_rt_secret_interval
= 10 * 60 * HZ
;
131 static unsigned long rt_deadline
;
133 #define RTprint(a...) printk(KERN_DEBUG a)
135 static struct timer_list rt_flush_timer
;
136 static struct timer_list rt_periodic_timer
;
137 static struct timer_list rt_secret_timer
;
140 * Interface to generic destination cache.
143 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
144 static void ipv4_dst_destroy(struct dst_entry
*dst
);
145 static void ipv4_dst_ifdown(struct dst_entry
*dst
,
146 struct net_device
*dev
, int how
);
147 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
148 static void ipv4_link_failure(struct sk_buff
*skb
);
149 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
150 static int rt_garbage_collect(void);
153 static struct dst_ops ipv4_dst_ops
= {
155 .protocol
= __constant_htons(ETH_P_IP
),
156 .gc
= rt_garbage_collect
,
157 .check
= ipv4_dst_check
,
158 .destroy
= ipv4_dst_destroy
,
159 .ifdown
= ipv4_dst_ifdown
,
160 .negative_advice
= ipv4_negative_advice
,
161 .link_failure
= ipv4_link_failure
,
162 .update_pmtu
= ip_rt_update_pmtu
,
163 .entry_size
= sizeof(struct rtable
),
166 #define ECN_OR_COST(class) TC_PRIO_##class
168 __u8 ip_tos2prio
[16] = {
172 ECN_OR_COST(BESTEFFORT
),
178 ECN_OR_COST(INTERACTIVE
),
180 ECN_OR_COST(INTERACTIVE
),
181 TC_PRIO_INTERACTIVE_BULK
,
182 ECN_OR_COST(INTERACTIVE_BULK
),
183 TC_PRIO_INTERACTIVE_BULK
,
184 ECN_OR_COST(INTERACTIVE_BULK
)
192 /* The locking scheme is rather straight forward:
194 * 1) Read-Copy Update protects the buckets of the central route hash.
195 * 2) Only writers remove entries, and they hold the lock
196 * as they look at rtable reference counts.
197 * 3) Only readers acquire references to rtable entries,
198 * they do so with atomic increments and with the
202 struct rt_hash_bucket
{
203 struct rtable
*chain
;
205 } __attribute__((__aligned__(8)));
207 static struct rt_hash_bucket
*rt_hash_table
;
208 static unsigned rt_hash_mask
;
209 static int rt_hash_log
;
210 static unsigned int rt_hash_rnd
;
212 struct rt_cache_stat
*rt_cache_stat
;
214 static int rt_intern_hash(unsigned hash
, struct rtable
*rth
,
215 struct rtable
**res
);
217 static unsigned int rt_hash_code(u32 daddr
, u32 saddr
, u8 tos
)
219 return (jhash_3words(daddr
, saddr
, (u32
) tos
, rt_hash_rnd
)
223 #ifdef CONFIG_PROC_FS
224 struct rt_cache_iter_state
{
228 static struct rtable
*rt_cache_get_first(struct seq_file
*seq
)
230 struct rtable
*r
= NULL
;
231 struct rt_cache_iter_state
*st
= seq
->private;
233 for (st
->bucket
= rt_hash_mask
; st
->bucket
>= 0; --st
->bucket
) {
235 r
= rt_hash_table
[st
->bucket
].chain
;
238 rcu_read_unlock_bh();
243 static struct rtable
*rt_cache_get_next(struct seq_file
*seq
, struct rtable
*r
)
245 struct rt_cache_iter_state
*st
= rcu_dereference(seq
->private);
249 rcu_read_unlock_bh();
250 if (--st
->bucket
< 0)
253 r
= rt_hash_table
[st
->bucket
].chain
;
258 static struct rtable
*rt_cache_get_idx(struct seq_file
*seq
, loff_t pos
)
260 struct rtable
*r
= rt_cache_get_first(seq
);
263 while (pos
&& (r
= rt_cache_get_next(seq
, r
)))
265 return pos
? NULL
: r
;
268 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
270 return *pos
? rt_cache_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
273 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
275 struct rtable
*r
= NULL
;
277 if (v
== SEQ_START_TOKEN
)
278 r
= rt_cache_get_first(seq
);
280 r
= rt_cache_get_next(seq
, v
);
285 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
287 if (v
&& v
!= SEQ_START_TOKEN
)
288 rcu_read_unlock_bh();
291 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
293 if (v
== SEQ_START_TOKEN
)
294 seq_printf(seq
, "%-127s\n",
295 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
296 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
299 struct rtable
*r
= v
;
302 sprintf(temp
, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
303 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
304 r
->u
.dst
.dev
? r
->u
.dst
.dev
->name
: "*",
305 (unsigned long)r
->rt_dst
, (unsigned long)r
->rt_gateway
,
306 r
->rt_flags
, atomic_read(&r
->u
.dst
.__refcnt
),
307 r
->u
.dst
.__use
, 0, (unsigned long)r
->rt_src
,
308 (dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) ?
309 (int)dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) + 40 : 0),
310 dst_metric(&r
->u
.dst
, RTAX_WINDOW
),
311 (int)((dst_metric(&r
->u
.dst
, RTAX_RTT
) >> 3) +
312 dst_metric(&r
->u
.dst
, RTAX_RTTVAR
)),
314 r
->u
.dst
.hh
? atomic_read(&r
->u
.dst
.hh
->hh_refcnt
) : -1,
315 r
->u
.dst
.hh
? (r
->u
.dst
.hh
->hh_output
==
318 seq_printf(seq
, "%-127s\n", temp
);
323 static struct seq_operations rt_cache_seq_ops
= {
324 .start
= rt_cache_seq_start
,
325 .next
= rt_cache_seq_next
,
326 .stop
= rt_cache_seq_stop
,
327 .show
= rt_cache_seq_show
,
330 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
332 struct seq_file
*seq
;
334 struct rt_cache_iter_state
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
338 rc
= seq_open(file
, &rt_cache_seq_ops
);
341 seq
= file
->private_data
;
343 memset(s
, 0, sizeof(*s
));
351 static struct file_operations rt_cache_seq_fops
= {
352 .owner
= THIS_MODULE
,
353 .open
= rt_cache_seq_open
,
356 .release
= seq_release_private
,
360 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
365 return SEQ_START_TOKEN
;
367 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
368 if (!cpu_possible(cpu
))
371 return per_cpu_ptr(rt_cache_stat
, cpu
);
376 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
380 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
381 if (!cpu_possible(cpu
))
384 return per_cpu_ptr(rt_cache_stat
, cpu
);
390 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
395 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
397 struct rt_cache_stat
*st
= v
;
399 if (v
== SEQ_START_TOKEN
) {
400 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
404 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
405 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
406 atomic_read(&ipv4_dst_ops
.entries
),
429 static struct seq_operations rt_cpu_seq_ops
= {
430 .start
= rt_cpu_seq_start
,
431 .next
= rt_cpu_seq_next
,
432 .stop
= rt_cpu_seq_stop
,
433 .show
= rt_cpu_seq_show
,
437 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
439 return seq_open(file
, &rt_cpu_seq_ops
);
442 static struct file_operations rt_cpu_seq_fops
= {
443 .owner
= THIS_MODULE
,
444 .open
= rt_cpu_seq_open
,
447 .release
= seq_release
,
450 #endif /* CONFIG_PROC_FS */
452 static __inline__
void rt_free(struct rtable
*rt
)
454 multipath_remove(rt
);
455 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
458 static __inline__
void rt_drop(struct rtable
*rt
)
460 multipath_remove(rt
);
462 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
465 static __inline__
int rt_fast_clean(struct rtable
*rth
)
467 /* Kill broadcast/multicast entries very aggresively, if they
468 collide in hash table with more useful entries */
469 return (rth
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) &&
470 rth
->fl
.iif
&& rth
->u
.rt_next
;
473 static __inline__
int rt_valuable(struct rtable
*rth
)
475 return (rth
->rt_flags
& (RTCF_REDIRECTED
| RTCF_NOTIFY
)) ||
479 static int rt_may_expire(struct rtable
*rth
, unsigned long tmo1
, unsigned long tmo2
)
484 if (atomic_read(&rth
->u
.dst
.__refcnt
))
488 if (rth
->u
.dst
.expires
&&
489 time_after_eq(jiffies
, rth
->u
.dst
.expires
))
492 age
= jiffies
- rth
->u
.dst
.lastuse
;
494 if ((age
<= tmo1
&& !rt_fast_clean(rth
)) ||
495 (age
<= tmo2
&& rt_valuable(rth
)))
501 /* Bits of score are:
503 * 30: not quite useless
504 * 29..0: usage counter
506 static inline u32
rt_score(struct rtable
*rt
)
508 u32 score
= jiffies
- rt
->u
.dst
.lastuse
;
510 score
= ~score
& ~(3<<30);
516 !(rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
|RTCF_LOCAL
)))
522 static inline int compare_keys(struct flowi
*fl1
, struct flowi
*fl2
)
524 return memcmp(&fl1
->nl_u
.ip4_u
, &fl2
->nl_u
.ip4_u
, sizeof(fl1
->nl_u
.ip4_u
)) == 0 &&
525 fl1
->oif
== fl2
->oif
&&
526 fl1
->iif
== fl2
->iif
;
529 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
530 static struct rtable
**rt_remove_balanced_route(struct rtable
**chain_head
,
531 struct rtable
*expentry
,
534 int passedexpired
= 0;
535 struct rtable
**nextstep
= NULL
;
536 struct rtable
**rthp
= chain_head
;
542 while ((rth
= *rthp
) != NULL
) {
546 if (((*rthp
)->u
.dst
.flags
& DST_BALANCED
) != 0 &&
547 compare_keys(&(*rthp
)->fl
, &expentry
->fl
)) {
548 if (*rthp
== expentry
) {
549 *rthp
= rth
->u
.rt_next
;
552 *rthp
= rth
->u
.rt_next
;
558 if (!((*rthp
)->u
.dst
.flags
& DST_BALANCED
) &&
559 passedexpired
&& !nextstep
)
560 nextstep
= &rth
->u
.rt_next
;
562 rthp
= &rth
->u
.rt_next
;
572 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
575 /* This runs via a timer and thus is always in BH context. */
576 static void rt_check_expire(unsigned long dummy
)
580 struct rtable
*rth
, **rthp
;
581 unsigned long now
= jiffies
;
583 for (t
= ip_rt_gc_interval
<< rt_hash_log
; t
>= 0;
584 t
-= ip_rt_gc_timeout
) {
585 unsigned long tmo
= ip_rt_gc_timeout
;
587 i
= (i
+ 1) & rt_hash_mask
;
588 rthp
= &rt_hash_table
[i
].chain
;
590 spin_lock(&rt_hash_table
[i
].lock
);
591 while ((rth
= *rthp
) != NULL
) {
592 if (rth
->u
.dst
.expires
) {
593 /* Entry is expired even if it is in use */
594 if (time_before_eq(now
, rth
->u
.dst
.expires
)) {
596 rthp
= &rth
->u
.rt_next
;
599 } else if (!rt_may_expire(rth
, tmo
, ip_rt_gc_timeout
)) {
601 rthp
= &rth
->u
.rt_next
;
605 /* Cleanup aged off entries. */
606 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
607 /* remove all related balanced entries if necessary */
608 if (rth
->u
.dst
.flags
& DST_BALANCED
) {
609 rthp
= rt_remove_balanced_route(
610 &rt_hash_table
[i
].chain
,
615 *rthp
= rth
->u
.rt_next
;
618 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
619 *rthp
= rth
->u
.rt_next
;
621 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
623 spin_unlock(&rt_hash_table
[i
].lock
);
625 /* Fallback loop breaker. */
626 if (time_after(jiffies
, now
))
630 mod_timer(&rt_periodic_timer
, now
+ ip_rt_gc_interval
);
633 /* This can run from both BH and non-BH contexts, the latter
634 * in the case of a forced flush event.
636 static void rt_run_flush(unsigned long dummy
)
639 struct rtable
*rth
, *next
;
643 get_random_bytes(&rt_hash_rnd
, 4);
645 for (i
= rt_hash_mask
; i
>= 0; i
--) {
646 spin_lock_bh(&rt_hash_table
[i
].lock
);
647 rth
= rt_hash_table
[i
].chain
;
649 rt_hash_table
[i
].chain
= NULL
;
650 spin_unlock_bh(&rt_hash_table
[i
].lock
);
652 for (; rth
; rth
= next
) {
653 next
= rth
->u
.rt_next
;
659 static DEFINE_SPINLOCK(rt_flush_lock
);
661 void rt_cache_flush(int delay
)
663 unsigned long now
= jiffies
;
664 int user_mode
= !in_softirq();
667 delay
= ip_rt_min_delay
;
669 /* flush existing multipath state*/
672 spin_lock_bh(&rt_flush_lock
);
674 if (del_timer(&rt_flush_timer
) && delay
> 0 && rt_deadline
) {
675 long tmo
= (long)(rt_deadline
- now
);
677 /* If flush timer is already running
678 and flush request is not immediate (delay > 0):
680 if deadline is not achieved, prolongate timer to "delay",
681 otherwise fire it at deadline time.
684 if (user_mode
&& tmo
< ip_rt_max_delay
-ip_rt_min_delay
)
692 spin_unlock_bh(&rt_flush_lock
);
697 if (rt_deadline
== 0)
698 rt_deadline
= now
+ ip_rt_max_delay
;
700 mod_timer(&rt_flush_timer
, now
+delay
);
701 spin_unlock_bh(&rt_flush_lock
);
704 static void rt_secret_rebuild(unsigned long dummy
)
706 unsigned long now
= jiffies
;
709 mod_timer(&rt_secret_timer
, now
+ ip_rt_secret_interval
);
713 Short description of GC goals.
715 We want to build algorithm, which will keep routing cache
716 at some equilibrium point, when number of aged off entries
717 is kept approximately equal to newly generated ones.
719 Current expiration strength is variable "expire".
720 We try to adjust it dynamically, so that if networking
721 is idle expires is large enough to keep enough of warm entries,
722 and when load increases it reduces to limit cache size.
725 static int rt_garbage_collect(void)
727 static unsigned long expire
= RT_GC_TIMEOUT
;
728 static unsigned long last_gc
;
730 static int equilibrium
;
731 struct rtable
*rth
, **rthp
;
732 unsigned long now
= jiffies
;
736 * Garbage collection is pretty expensive,
737 * do not make it too frequently.
740 RT_CACHE_STAT_INC(gc_total
);
742 if (now
- last_gc
< ip_rt_gc_min_interval
&&
743 atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
) {
744 RT_CACHE_STAT_INC(gc_ignored
);
748 /* Calculate number of entries, which we want to expire now. */
749 goal
= atomic_read(&ipv4_dst_ops
.entries
) -
750 (ip_rt_gc_elasticity
<< rt_hash_log
);
752 if (equilibrium
< ipv4_dst_ops
.gc_thresh
)
753 equilibrium
= ipv4_dst_ops
.gc_thresh
;
754 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
756 equilibrium
+= min_t(unsigned int, goal
/ 2, rt_hash_mask
+ 1);
757 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
760 /* We are in dangerous area. Try to reduce cache really
763 goal
= max_t(unsigned int, goal
/ 2, rt_hash_mask
+ 1);
764 equilibrium
= atomic_read(&ipv4_dst_ops
.entries
) - goal
;
767 if (now
- last_gc
>= ip_rt_gc_min_interval
)
778 for (i
= rt_hash_mask
, k
= rover
; i
>= 0; i
--) {
779 unsigned long tmo
= expire
;
781 k
= (k
+ 1) & rt_hash_mask
;
782 rthp
= &rt_hash_table
[k
].chain
;
783 spin_lock_bh(&rt_hash_table
[k
].lock
);
784 while ((rth
= *rthp
) != NULL
) {
785 if (!rt_may_expire(rth
, tmo
, expire
)) {
787 rthp
= &rth
->u
.rt_next
;
790 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
791 /* remove all related balanced entries
794 if (rth
->u
.dst
.flags
& DST_BALANCED
) {
797 rthp
= rt_remove_balanced_route(
798 &rt_hash_table
[i
].chain
,
805 *rthp
= rth
->u
.rt_next
;
809 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
810 *rthp
= rth
->u
.rt_next
;
813 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
815 spin_unlock_bh(&rt_hash_table
[k
].lock
);
824 /* Goal is not achieved. We stop process if:
826 - if expire reduced to zero. Otherwise, expire is halfed.
827 - if table is not full.
828 - if we are called from interrupt.
829 - jiffies check is just fallback/debug loop breaker.
830 We will not spin here for long time in any case.
833 RT_CACHE_STAT_INC(gc_goal_miss
);
839 #if RT_CACHE_DEBUG >= 2
840 printk(KERN_DEBUG
"expire>> %u %d %d %d\n", expire
,
841 atomic_read(&ipv4_dst_ops
.entries
), goal
, i
);
844 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
846 } while (!in_softirq() && time_before_eq(jiffies
, now
));
848 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
851 printk(KERN_WARNING
"dst cache overflow\n");
852 RT_CACHE_STAT_INC(gc_dst_overflow
);
856 expire
+= ip_rt_gc_min_interval
;
857 if (expire
> ip_rt_gc_timeout
||
858 atomic_read(&ipv4_dst_ops
.entries
) < ipv4_dst_ops
.gc_thresh
)
859 expire
= ip_rt_gc_timeout
;
860 #if RT_CACHE_DEBUG >= 2
861 printk(KERN_DEBUG
"expire++ %u %d %d %d\n", expire
,
862 atomic_read(&ipv4_dst_ops
.entries
), goal
, rover
);
867 static int rt_intern_hash(unsigned hash
, struct rtable
*rt
, struct rtable
**rp
)
869 struct rtable
*rth
, **rthp
;
871 struct rtable
*cand
, **candp
;
874 int attempts
= !in_softirq();
883 rthp
= &rt_hash_table
[hash
].chain
;
885 spin_lock_bh(&rt_hash_table
[hash
].lock
);
886 while ((rth
= *rthp
) != NULL
) {
887 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
888 if (!(rth
->u
.dst
.flags
& DST_BALANCED
) &&
889 compare_keys(&rth
->fl
, &rt
->fl
)) {
891 if (compare_keys(&rth
->fl
, &rt
->fl
)) {
894 *rthp
= rth
->u
.rt_next
;
896 * Since lookup is lockfree, the deletion
897 * must be visible to another weakly ordered CPU before
898 * the insertion at the start of the hash chain.
900 rcu_assign_pointer(rth
->u
.rt_next
,
901 rt_hash_table
[hash
].chain
);
903 * Since lookup is lockfree, the update writes
904 * must be ordered for consistency on SMP.
906 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rth
);
909 dst_hold(&rth
->u
.dst
);
910 rth
->u
.dst
.lastuse
= now
;
911 spin_unlock_bh(&rt_hash_table
[hash
].lock
);
918 if (!atomic_read(&rth
->u
.dst
.__refcnt
)) {
919 u32 score
= rt_score(rth
);
921 if (score
<= min_score
) {
930 rthp
= &rth
->u
.rt_next
;
934 /* ip_rt_gc_elasticity used to be average length of chain
935 * length, when exceeded gc becomes really aggressive.
937 * The second limit is less certain. At the moment it allows
938 * only 2 entries per bucket. We will see.
940 if (chain_length
> ip_rt_gc_elasticity
) {
941 *candp
= cand
->u
.rt_next
;
946 /* Try to bind route to arp only if it is output
947 route or unicast forwarding path.
949 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
950 int err
= arp_bind_neighbour(&rt
->u
.dst
);
952 spin_unlock_bh(&rt_hash_table
[hash
].lock
);
954 if (err
!= -ENOBUFS
) {
959 /* Neighbour tables are full and nothing
960 can be released. Try to shrink route cache,
961 it is most likely it holds some neighbour records.
963 if (attempts
-- > 0) {
964 int saved_elasticity
= ip_rt_gc_elasticity
;
965 int saved_int
= ip_rt_gc_min_interval
;
966 ip_rt_gc_elasticity
= 1;
967 ip_rt_gc_min_interval
= 0;
968 rt_garbage_collect();
969 ip_rt_gc_min_interval
= saved_int
;
970 ip_rt_gc_elasticity
= saved_elasticity
;
975 printk(KERN_WARNING
"Neighbour table overflow.\n");
981 rt
->u
.rt_next
= rt_hash_table
[hash
].chain
;
982 #if RT_CACHE_DEBUG >= 2
985 printk(KERN_DEBUG
"rt_cache @%02x: %u.%u.%u.%u", hash
,
986 NIPQUAD(rt
->rt_dst
));
987 for (trt
= rt
->u
.rt_next
; trt
; trt
= trt
->u
.rt_next
)
988 printk(" . %u.%u.%u.%u", NIPQUAD(trt
->rt_dst
));
992 rt_hash_table
[hash
].chain
= rt
;
993 spin_unlock_bh(&rt_hash_table
[hash
].lock
);
998 void rt_bind_peer(struct rtable
*rt
, int create
)
1000 static DEFINE_SPINLOCK(rt_peer_lock
);
1001 struct inet_peer
*peer
;
1003 peer
= inet_getpeer(rt
->rt_dst
, create
);
1005 spin_lock_bh(&rt_peer_lock
);
1006 if (rt
->peer
== NULL
) {
1010 spin_unlock_bh(&rt_peer_lock
);
1016 * Peer allocation may fail only in serious out-of-memory conditions. However
1017 * we still can generate some output.
1018 * Random ID selection looks a bit dangerous because we have no chances to
1019 * select ID being unique in a reasonable period of time.
1020 * But broken packet identifier may be better than no packet at all.
1022 static void ip_select_fb_ident(struct iphdr
*iph
)
1024 static DEFINE_SPINLOCK(ip_fb_id_lock
);
1025 static u32 ip_fallback_id
;
1028 spin_lock_bh(&ip_fb_id_lock
);
1029 salt
= secure_ip_id(ip_fallback_id
^ iph
->daddr
);
1030 iph
->id
= htons(salt
& 0xFFFF);
1031 ip_fallback_id
= salt
;
1032 spin_unlock_bh(&ip_fb_id_lock
);
1035 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
1037 struct rtable
*rt
= (struct rtable
*) dst
;
1040 if (rt
->peer
== NULL
)
1041 rt_bind_peer(rt
, 1);
1043 /* If peer is attached to destination, it is never detached,
1044 so that we need not to grab a lock to dereference it.
1047 iph
->id
= htons(inet_getid(rt
->peer
, more
));
1051 printk(KERN_DEBUG
"rt_bind_peer(0) @%p\n",
1052 __builtin_return_address(0));
1054 ip_select_fb_ident(iph
);
1057 static void rt_del(unsigned hash
, struct rtable
*rt
)
1059 struct rtable
**rthp
;
1061 spin_lock_bh(&rt_hash_table
[hash
].lock
);
1063 for (rthp
= &rt_hash_table
[hash
].chain
; *rthp
;
1064 rthp
= &(*rthp
)->u
.rt_next
)
1066 *rthp
= rt
->u
.rt_next
;
1070 spin_unlock_bh(&rt_hash_table
[hash
].lock
);
1073 void ip_rt_redirect(u32 old_gw
, u32 daddr
, u32 new_gw
,
1074 u32 saddr
, u8 tos
, struct net_device
*dev
)
1077 struct in_device
*in_dev
= in_dev_get(dev
);
1078 struct rtable
*rth
, **rthp
;
1079 u32 skeys
[2] = { saddr
, 0 };
1080 int ikeys
[2] = { dev
->ifindex
, 0 };
1082 tos
&= IPTOS_RT_MASK
;
1087 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
)
1088 || MULTICAST(new_gw
) || BADCLASS(new_gw
) || ZERONET(new_gw
))
1089 goto reject_redirect
;
1091 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
1092 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
1093 goto reject_redirect
;
1094 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
1095 goto reject_redirect
;
1097 if (inet_addr_type(new_gw
) != RTN_UNICAST
)
1098 goto reject_redirect
;
1101 for (i
= 0; i
< 2; i
++) {
1102 for (k
= 0; k
< 2; k
++) {
1103 unsigned hash
= rt_hash_code(daddr
,
1104 skeys
[i
] ^ (ikeys
[k
] << 5),
1107 rthp
=&rt_hash_table
[hash
].chain
;
1110 while ((rth
= rcu_dereference(*rthp
)) != NULL
) {
1113 if (rth
->fl
.fl4_dst
!= daddr
||
1114 rth
->fl
.fl4_src
!= skeys
[i
] ||
1115 rth
->fl
.fl4_tos
!= tos
||
1116 rth
->fl
.oif
!= ikeys
[k
] ||
1118 rthp
= &rth
->u
.rt_next
;
1122 if (rth
->rt_dst
!= daddr
||
1123 rth
->rt_src
!= saddr
||
1125 rth
->rt_gateway
!= old_gw
||
1126 rth
->u
.dst
.dev
!= dev
)
1129 dst_hold(&rth
->u
.dst
);
1132 rt
= dst_alloc(&ipv4_dst_ops
);
1139 /* Copy all the information. */
1141 INIT_RCU_HEAD(&rt
->u
.dst
.rcu_head
);
1142 rt
->u
.dst
.__use
= 1;
1143 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1144 rt
->u
.dst
.child
= NULL
;
1146 dev_hold(rt
->u
.dst
.dev
);
1148 in_dev_hold(rt
->idev
);
1149 rt
->u
.dst
.obsolete
= 0;
1150 rt
->u
.dst
.lastuse
= jiffies
;
1151 rt
->u
.dst
.path
= &rt
->u
.dst
;
1152 rt
->u
.dst
.neighbour
= NULL
;
1153 rt
->u
.dst
.hh
= NULL
;
1154 rt
->u
.dst
.xfrm
= NULL
;
1156 rt
->rt_flags
|= RTCF_REDIRECTED
;
1158 /* Gateway is different ... */
1159 rt
->rt_gateway
= new_gw
;
1161 /* Redirect received -> path was valid */
1162 dst_confirm(&rth
->u
.dst
);
1165 atomic_inc(&rt
->peer
->refcnt
);
1167 if (arp_bind_neighbour(&rt
->u
.dst
) ||
1168 !(rt
->u
.dst
.neighbour
->nud_state
&
1170 if (rt
->u
.dst
.neighbour
)
1171 neigh_event_send(rt
->u
.dst
.neighbour
, NULL
);
1178 if (!rt_intern_hash(hash
, rt
, &rt
))
1191 #ifdef CONFIG_IP_ROUTE_VERBOSE
1192 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
1193 printk(KERN_INFO
"Redirect from %u.%u.%u.%u on %s about "
1194 "%u.%u.%u.%u ignored.\n"
1195 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, "
1197 NIPQUAD(old_gw
), dev
->name
, NIPQUAD(new_gw
),
1198 NIPQUAD(saddr
), NIPQUAD(daddr
), tos
);
1203 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
1205 struct rtable
*rt
= (struct rtable
*)dst
;
1206 struct dst_entry
*ret
= dst
;
1209 if (dst
->obsolete
) {
1212 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
1213 rt
->u
.dst
.expires
) {
1214 unsigned hash
= rt_hash_code(rt
->fl
.fl4_dst
,
1218 #if RT_CACHE_DEBUG >= 1
1219 printk(KERN_DEBUG
"ip_rt_advice: redirect to "
1220 "%u.%u.%u.%u/%02x dropped\n",
1221 NIPQUAD(rt
->rt_dst
), rt
->fl
.fl4_tos
);
1232 * 1. The first ip_rt_redirect_number redirects are sent
1233 * with exponential backoff, then we stop sending them at all,
1234 * assuming that the host ignores our redirects.
1235 * 2. If we did not see packets requiring redirects
1236 * during ip_rt_redirect_silence, we assume that the host
1237 * forgot redirected route and start to send redirects again.
1239 * This algorithm is much cheaper and more intelligent than dumb load limiting
1242 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1243 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1246 void ip_rt_send_redirect(struct sk_buff
*skb
)
1248 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1249 struct in_device
*in_dev
= in_dev_get(rt
->u
.dst
.dev
);
1254 if (!IN_DEV_TX_REDIRECTS(in_dev
))
1257 /* No redirected packets during ip_rt_redirect_silence;
1258 * reset the algorithm.
1260 if (time_after(jiffies
, rt
->u
.dst
.rate_last
+ ip_rt_redirect_silence
))
1261 rt
->u
.dst
.rate_tokens
= 0;
1263 /* Too many ignored redirects; do not send anything
1264 * set u.dst.rate_last to the last seen redirected packet.
1266 if (rt
->u
.dst
.rate_tokens
>= ip_rt_redirect_number
) {
1267 rt
->u
.dst
.rate_last
= jiffies
;
1271 /* Check for load limit; set rate_last to the latest sent
1274 if (time_after(jiffies
,
1275 (rt
->u
.dst
.rate_last
+
1276 (ip_rt_redirect_load
<< rt
->u
.dst
.rate_tokens
)))) {
1277 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
1278 rt
->u
.dst
.rate_last
= jiffies
;
1279 ++rt
->u
.dst
.rate_tokens
;
1280 #ifdef CONFIG_IP_ROUTE_VERBOSE
1281 if (IN_DEV_LOG_MARTIANS(in_dev
) &&
1282 rt
->u
.dst
.rate_tokens
== ip_rt_redirect_number
&&
1284 printk(KERN_WARNING
"host %u.%u.%u.%u/if%d ignores "
1285 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1286 NIPQUAD(rt
->rt_src
), rt
->rt_iif
,
1287 NIPQUAD(rt
->rt_dst
), NIPQUAD(rt
->rt_gateway
));
1294 static int ip_error(struct sk_buff
*skb
)
1296 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1300 switch (rt
->u
.dst
.error
) {
1305 code
= ICMP_HOST_UNREACH
;
1308 code
= ICMP_NET_UNREACH
;
1311 code
= ICMP_PKT_FILTERED
;
1316 rt
->u
.dst
.rate_tokens
+= now
- rt
->u
.dst
.rate_last
;
1317 if (rt
->u
.dst
.rate_tokens
> ip_rt_error_burst
)
1318 rt
->u
.dst
.rate_tokens
= ip_rt_error_burst
;
1319 rt
->u
.dst
.rate_last
= now
;
1320 if (rt
->u
.dst
.rate_tokens
>= ip_rt_error_cost
) {
1321 rt
->u
.dst
.rate_tokens
-= ip_rt_error_cost
;
1322 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
1325 out
: kfree_skb(skb
);
1330 * The last two values are not from the RFC but
1331 * are needed for AMPRnet AX.25 paths.
1334 static unsigned short mtu_plateau
[] =
1335 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1337 static __inline__
unsigned short guess_mtu(unsigned short old_mtu
)
1341 for (i
= 0; i
< ARRAY_SIZE(mtu_plateau
); i
++)
1342 if (old_mtu
> mtu_plateau
[i
])
1343 return mtu_plateau
[i
];
1347 unsigned short ip_rt_frag_needed(struct iphdr
*iph
, unsigned short new_mtu
)
1350 unsigned short old_mtu
= ntohs(iph
->tot_len
);
1352 u32 skeys
[2] = { iph
->saddr
, 0, };
1353 u32 daddr
= iph
->daddr
;
1354 u8 tos
= iph
->tos
& IPTOS_RT_MASK
;
1355 unsigned short est_mtu
= 0;
1357 if (ipv4_config
.no_pmtu_disc
)
1360 for (i
= 0; i
< 2; i
++) {
1361 unsigned hash
= rt_hash_code(daddr
, skeys
[i
], tos
);
1364 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
1365 rth
= rcu_dereference(rth
->u
.rt_next
)) {
1366 if (rth
->fl
.fl4_dst
== daddr
&&
1367 rth
->fl
.fl4_src
== skeys
[i
] &&
1368 rth
->rt_dst
== daddr
&&
1369 rth
->rt_src
== iph
->saddr
&&
1370 rth
->fl
.fl4_tos
== tos
&&
1372 !(dst_metric_locked(&rth
->u
.dst
, RTAX_MTU
))) {
1373 unsigned short mtu
= new_mtu
;
1375 if (new_mtu
< 68 || new_mtu
>= old_mtu
) {
1377 /* BSD 4.2 compatibility hack :-( */
1379 old_mtu
>= rth
->u
.dst
.metrics
[RTAX_MTU
-1] &&
1380 old_mtu
>= 68 + (iph
->ihl
<< 2))
1381 old_mtu
-= iph
->ihl
<< 2;
1383 mtu
= guess_mtu(old_mtu
);
1385 if (mtu
<= rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1386 if (mtu
< rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1387 dst_confirm(&rth
->u
.dst
);
1388 if (mtu
< ip_rt_min_pmtu
) {
1389 mtu
= ip_rt_min_pmtu
;
1390 rth
->u
.dst
.metrics
[RTAX_LOCK
-1] |=
1393 rth
->u
.dst
.metrics
[RTAX_MTU
-1] = mtu
;
1394 dst_set_expires(&rth
->u
.dst
,
1403 return est_mtu
? : new_mtu
;
1406 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
1408 if (dst
->metrics
[RTAX_MTU
-1] > mtu
&& mtu
>= 68 &&
1409 !(dst_metric_locked(dst
, RTAX_MTU
))) {
1410 if (mtu
< ip_rt_min_pmtu
) {
1411 mtu
= ip_rt_min_pmtu
;
1412 dst
->metrics
[RTAX_LOCK
-1] |= (1 << RTAX_MTU
);
1414 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1415 dst_set_expires(dst
, ip_rt_mtu_expires
);
1419 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1424 static void ipv4_dst_destroy(struct dst_entry
*dst
)
1426 struct rtable
*rt
= (struct rtable
*) dst
;
1427 struct inet_peer
*peer
= rt
->peer
;
1428 struct in_device
*idev
= rt
->idev
;
1441 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
1444 struct rtable
*rt
= (struct rtable
*) dst
;
1445 struct in_device
*idev
= rt
->idev
;
1446 if (dev
!= &loopback_dev
&& idev
&& idev
->dev
== dev
) {
1447 struct in_device
*loopback_idev
= in_dev_get(&loopback_dev
);
1448 if (loopback_idev
) {
1449 rt
->idev
= loopback_idev
;
1455 static void ipv4_link_failure(struct sk_buff
*skb
)
1459 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1461 rt
= (struct rtable
*) skb
->dst
;
1463 dst_set_expires(&rt
->u
.dst
, 0);
1466 static int ip_rt_bug(struct sk_buff
*skb
)
1468 printk(KERN_DEBUG
"ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1469 NIPQUAD(skb
->nh
.iph
->saddr
), NIPQUAD(skb
->nh
.iph
->daddr
),
1470 skb
->dev
? skb
->dev
->name
: "?");
1476 We do not cache source address of outgoing interface,
1477 because it is used only by IP RR, TS and SRR options,
1478 so that it out of fast path.
1480 BTW remember: "addr" is allowed to be not aligned
1484 void ip_rt_get_source(u8
*addr
, struct rtable
*rt
)
1487 struct fib_result res
;
1489 if (rt
->fl
.iif
== 0)
1491 else if (fib_lookup(&rt
->fl
, &res
) == 0) {
1492 src
= FIB_RES_PREFSRC(res
);
1495 src
= inet_select_addr(rt
->u
.dst
.dev
, rt
->rt_gateway
,
1497 memcpy(addr
, &src
, 4);
1500 #ifdef CONFIG_NET_CLS_ROUTE
1501 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1503 if (!(rt
->u
.dst
.tclassid
& 0xFFFF))
1504 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF;
1505 if (!(rt
->u
.dst
.tclassid
& 0xFFFF0000))
1506 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF0000;
1510 static void rt_set_nexthop(struct rtable
*rt
, struct fib_result
*res
, u32 itag
)
1512 struct fib_info
*fi
= res
->fi
;
1515 if (FIB_RES_GW(*res
) &&
1516 FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
1517 rt
->rt_gateway
= FIB_RES_GW(*res
);
1518 memcpy(rt
->u
.dst
.metrics
, fi
->fib_metrics
,
1519 sizeof(rt
->u
.dst
.metrics
));
1520 if (fi
->fib_mtu
== 0) {
1521 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = rt
->u
.dst
.dev
->mtu
;
1522 if (rt
->u
.dst
.metrics
[RTAX_LOCK
-1] & (1 << RTAX_MTU
) &&
1523 rt
->rt_gateway
!= rt
->rt_dst
&&
1524 rt
->u
.dst
.dev
->mtu
> 576)
1525 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = 576;
1527 #ifdef CONFIG_NET_CLS_ROUTE
1528 rt
->u
.dst
.tclassid
= FIB_RES_NH(*res
).nh_tclassid
;
1531 rt
->u
.dst
.metrics
[RTAX_MTU
-1]= rt
->u
.dst
.dev
->mtu
;
1533 if (rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] == 0)
1534 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = sysctl_ip_default_ttl
;
1535 if (rt
->u
.dst
.metrics
[RTAX_MTU
-1] > IP_MAX_MTU
)
1536 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = IP_MAX_MTU
;
1537 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] == 0)
1538 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = max_t(unsigned int, rt
->u
.dst
.dev
->mtu
- 40,
1540 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] > 65535 - 40)
1541 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = 65535 - 40;
1543 #ifdef CONFIG_NET_CLS_ROUTE
1544 #ifdef CONFIG_IP_MULTIPLE_TABLES
1545 set_class_tag(rt
, fib_rules_tclass(res
));
1547 set_class_tag(rt
, itag
);
1549 rt
->rt_type
= res
->type
;
1552 static int ip_route_input_mc(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
1553 u8 tos
, struct net_device
*dev
, int our
)
1558 struct in_device
*in_dev
= in_dev_get(dev
);
1561 /* Primary sanity checks. */
1566 if (MULTICAST(saddr
) || BADCLASS(saddr
) || LOOPBACK(saddr
) ||
1567 skb
->protocol
!= htons(ETH_P_IP
))
1570 if (ZERONET(saddr
)) {
1571 if (!LOCAL_MCAST(daddr
))
1573 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1574 } else if (fib_validate_source(saddr
, 0, tos
, 0,
1575 dev
, &spec_dst
, &itag
) < 0)
1578 rth
= dst_alloc(&ipv4_dst_ops
);
1582 rth
->u
.dst
.output
= ip_rt_bug
;
1584 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1585 rth
->u
.dst
.flags
= DST_HOST
;
1586 if (in_dev
->cnf
.no_policy
)
1587 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1588 rth
->fl
.fl4_dst
= daddr
;
1589 rth
->rt_dst
= daddr
;
1590 rth
->fl
.fl4_tos
= tos
;
1591 #ifdef CONFIG_IP_ROUTE_FWMARK
1592 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
1594 rth
->fl
.fl4_src
= saddr
;
1595 rth
->rt_src
= saddr
;
1596 #ifdef CONFIG_NET_CLS_ROUTE
1597 rth
->u
.dst
.tclassid
= itag
;
1600 rth
->fl
.iif
= dev
->ifindex
;
1601 rth
->u
.dst
.dev
= &loopback_dev
;
1602 dev_hold(rth
->u
.dst
.dev
);
1603 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1605 rth
->rt_gateway
= daddr
;
1606 rth
->rt_spec_dst
= spec_dst
;
1607 rth
->rt_type
= RTN_MULTICAST
;
1608 rth
->rt_flags
= RTCF_MULTICAST
;
1610 rth
->u
.dst
.input
= ip_local_deliver
;
1611 rth
->rt_flags
|= RTCF_LOCAL
;
1614 #ifdef CONFIG_IP_MROUTE
1615 if (!LOCAL_MCAST(daddr
) && IN_DEV_MFORWARD(in_dev
))
1616 rth
->u
.dst
.input
= ip_mr_input
;
1618 RT_CACHE_STAT_INC(in_slow_mc
);
1621 hash
= rt_hash_code(daddr
, saddr
^ (dev
->ifindex
<< 5), tos
);
1622 return rt_intern_hash(hash
, rth
, (struct rtable
**) &skb
->dst
);
1634 static void ip_handle_martian_source(struct net_device
*dev
,
1635 struct in_device
*in_dev
,
1636 struct sk_buff
*skb
,
1640 RT_CACHE_STAT_INC(in_martian_src
);
1641 #ifdef CONFIG_IP_ROUTE_VERBOSE
1642 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1644 * RFC1812 recommendation, if source is martian,
1645 * the only hint is MAC header.
1647 printk(KERN_WARNING
"martian source %u.%u.%u.%u from "
1648 "%u.%u.%u.%u, on dev %s\n",
1649 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
1650 if (dev
->hard_header_len
) {
1652 unsigned char *p
= skb
->mac
.raw
;
1653 printk(KERN_WARNING
"ll header: ");
1654 for (i
= 0; i
< dev
->hard_header_len
; i
++, p
++) {
1656 if (i
< (dev
->hard_header_len
- 1))
1665 static inline int __mkroute_input(struct sk_buff
*skb
,
1666 struct fib_result
* res
,
1667 struct in_device
*in_dev
,
1668 u32 daddr
, u32 saddr
, u32 tos
,
1669 struct rtable
**result
)
1674 struct in_device
*out_dev
;
1678 /* get a working reference to the output device */
1679 out_dev
= in_dev_get(FIB_RES_DEV(*res
));
1680 if (out_dev
== NULL
) {
1681 if (net_ratelimit())
1682 printk(KERN_CRIT
"Bug in ip_route_input" \
1683 "_slow(). Please, report\n");
1688 err
= fib_validate_source(saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1689 in_dev
->dev
, &spec_dst
, &itag
);
1691 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1699 flags
|= RTCF_DIRECTSRC
;
1701 if (out_dev
== in_dev
&& err
&& !(flags
& (RTCF_NAT
| RTCF_MASQ
)) &&
1702 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1703 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1704 flags
|= RTCF_DOREDIRECT
;
1706 if (skb
->protocol
!= htons(ETH_P_IP
)) {
1707 /* Not IP (i.e. ARP). Do not create route, if it is
1708 * invalid for proxy arp. DNAT routes are always valid.
1710 if (out_dev
== in_dev
&& !(flags
& RTCF_DNAT
)) {
1717 rth
= dst_alloc(&ipv4_dst_ops
);
1723 rth
->u
.dst
.flags
= DST_HOST
;
1724 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1725 if (res
->fi
->fib_nhs
> 1)
1726 rth
->u
.dst
.flags
|= DST_BALANCED
;
1728 if (in_dev
->cnf
.no_policy
)
1729 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1730 if (in_dev
->cnf
.no_xfrm
)
1731 rth
->u
.dst
.flags
|= DST_NOXFRM
;
1732 rth
->fl
.fl4_dst
= daddr
;
1733 rth
->rt_dst
= daddr
;
1734 rth
->fl
.fl4_tos
= tos
;
1735 #ifdef CONFIG_IP_ROUTE_FWMARK
1736 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
1738 rth
->fl
.fl4_src
= saddr
;
1739 rth
->rt_src
= saddr
;
1740 rth
->rt_gateway
= daddr
;
1742 rth
->fl
.iif
= in_dev
->dev
->ifindex
;
1743 rth
->u
.dst
.dev
= (out_dev
)->dev
;
1744 dev_hold(rth
->u
.dst
.dev
);
1745 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1747 rth
->rt_spec_dst
= spec_dst
;
1749 rth
->u
.dst
.input
= ip_forward
;
1750 rth
->u
.dst
.output
= ip_output
;
1752 rt_set_nexthop(rth
, res
, itag
);
1754 rth
->rt_flags
= flags
;
1759 /* release the working reference to the output device */
1760 in_dev_put(out_dev
);
1764 static inline int ip_mkroute_input_def(struct sk_buff
*skb
,
1765 struct fib_result
* res
,
1766 const struct flowi
*fl
,
1767 struct in_device
*in_dev
,
1768 u32 daddr
, u32 saddr
, u32 tos
)
1774 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1775 if (res
->fi
&& res
->fi
->fib_nhs
> 1 && fl
->oif
== 0)
1776 fib_select_multipath(fl
, res
);
1779 /* create a routing cache entry */
1780 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
1783 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1785 /* put it into the cache */
1786 hash
= rt_hash_code(daddr
, saddr
^ (fl
->iif
<< 5), tos
);
1787 return rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
1790 static inline int ip_mkroute_input(struct sk_buff
*skb
,
1791 struct fib_result
* res
,
1792 const struct flowi
*fl
,
1793 struct in_device
*in_dev
,
1794 u32 daddr
, u32 saddr
, u32 tos
)
1796 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1798 unsigned char hop
, hopcount
, lasthop
;
1803 hopcount
= res
->fi
->fib_nhs
;
1807 lasthop
= hopcount
- 1;
1809 /* distinguish between multipath and singlepath */
1811 return ip_mkroute_input_def(skb
, res
, fl
, in_dev
, daddr
,
1814 /* add all alternatives to the routing cache */
1815 for (hop
= 0; hop
< hopcount
; hop
++) {
1818 /* create a routing cache entry */
1819 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
,
1824 /* put it into the cache */
1825 hash
= rt_hash_code(daddr
, saddr
^ (fl
->iif
<< 5), tos
);
1826 err
= rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
1830 /* forward hop information to multipath impl. */
1831 multipath_set_nhinfo(rth
,
1832 FIB_RES_NETWORK(*res
),
1833 FIB_RES_NETMASK(*res
),
1837 /* only for the last hop the reference count is handled
1841 atomic_set(&(skb
->dst
->__refcnt
), 1);
1844 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1845 return ip_mkroute_input_def(skb
, res
, fl
, in_dev
, daddr
, saddr
, tos
);
1846 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1851 * NOTE. We drop all the packets that has local source
1852 * addresses, because every properly looped back packet
1853 * must have correct destination already attached by output routine.
1855 * Such approach solves two big problems:
1856 * 1. Not simplex devices are handled properly.
1857 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1860 static int ip_route_input_slow(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
1861 u8 tos
, struct net_device
*dev
)
1863 struct fib_result res
;
1864 struct in_device
*in_dev
= in_dev_get(dev
);
1865 struct flowi fl
= { .nl_u
= { .ip4_u
=
1869 .scope
= RT_SCOPE_UNIVERSE
,
1870 #ifdef CONFIG_IP_ROUTE_FWMARK
1871 .fwmark
= skb
->nfmark
1874 .iif
= dev
->ifindex
};
1877 struct rtable
* rth
;
1883 /* IP on this device is disabled. */
1888 /* Check for the most weird martians, which can be not detected
1892 if (MULTICAST(saddr
) || BADCLASS(saddr
) || LOOPBACK(saddr
))
1893 goto martian_source
;
1895 if (daddr
== 0xFFFFFFFF || (saddr
== 0 && daddr
== 0))
1898 /* Accept zero addresses only to limited broadcast;
1899 * I even do not know to fix it or not. Waiting for complains :-)
1902 goto martian_source
;
1904 if (BADCLASS(daddr
) || ZERONET(daddr
) || LOOPBACK(daddr
))
1905 goto martian_destination
;
1908 * Now we are ready to route packet.
1910 if ((err
= fib_lookup(&fl
, &res
)) != 0) {
1911 if (!IN_DEV_FORWARD(in_dev
))
1917 RT_CACHE_STAT_INC(in_slow_tot
);
1919 if (res
.type
== RTN_BROADCAST
)
1922 if (res
.type
== RTN_LOCAL
) {
1924 result
= fib_validate_source(saddr
, daddr
, tos
,
1925 loopback_dev
.ifindex
,
1926 dev
, &spec_dst
, &itag
);
1928 goto martian_source
;
1930 flags
|= RTCF_DIRECTSRC
;
1935 if (!IN_DEV_FORWARD(in_dev
))
1937 if (res
.type
!= RTN_UNICAST
)
1938 goto martian_destination
;
1940 err
= ip_mkroute_input(skb
, &res
, &fl
, in_dev
, daddr
, saddr
, tos
);
1941 if (err
== -ENOBUFS
)
1953 if (skb
->protocol
!= htons(ETH_P_IP
))
1957 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1959 err
= fib_validate_source(saddr
, 0, tos
, 0, dev
, &spec_dst
,
1962 goto martian_source
;
1964 flags
|= RTCF_DIRECTSRC
;
1966 flags
|= RTCF_BROADCAST
;
1967 res
.type
= RTN_BROADCAST
;
1968 RT_CACHE_STAT_INC(in_brd
);
1971 rth
= dst_alloc(&ipv4_dst_ops
);
1975 rth
->u
.dst
.output
= ip_rt_bug
;
1977 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1978 rth
->u
.dst
.flags
= DST_HOST
;
1979 if (in_dev
->cnf
.no_policy
)
1980 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1981 rth
->fl
.fl4_dst
= daddr
;
1982 rth
->rt_dst
= daddr
;
1983 rth
->fl
.fl4_tos
= tos
;
1984 #ifdef CONFIG_IP_ROUTE_FWMARK
1985 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
1987 rth
->fl
.fl4_src
= saddr
;
1988 rth
->rt_src
= saddr
;
1989 #ifdef CONFIG_NET_CLS_ROUTE
1990 rth
->u
.dst
.tclassid
= itag
;
1993 rth
->fl
.iif
= dev
->ifindex
;
1994 rth
->u
.dst
.dev
= &loopback_dev
;
1995 dev_hold(rth
->u
.dst
.dev
);
1996 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1997 rth
->rt_gateway
= daddr
;
1998 rth
->rt_spec_dst
= spec_dst
;
1999 rth
->u
.dst
.input
= ip_local_deliver
;
2000 rth
->rt_flags
= flags
|RTCF_LOCAL
;
2001 if (res
.type
== RTN_UNREACHABLE
) {
2002 rth
->u
.dst
.input
= ip_error
;
2003 rth
->u
.dst
.error
= -err
;
2004 rth
->rt_flags
&= ~RTCF_LOCAL
;
2006 rth
->rt_type
= res
.type
;
2007 hash
= rt_hash_code(daddr
, saddr
^ (fl
.iif
<< 5), tos
);
2008 err
= rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
2012 RT_CACHE_STAT_INC(in_no_route
);
2013 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_UNIVERSE
);
2014 res
.type
= RTN_UNREACHABLE
;
2018 * Do not cache martian addresses: they should be logged (RFC1812)
2020 martian_destination
:
2021 RT_CACHE_STAT_INC(in_martian_dst
);
2022 #ifdef CONFIG_IP_ROUTE_VERBOSE
2023 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
2024 printk(KERN_WARNING
"martian destination %u.%u.%u.%u from "
2025 "%u.%u.%u.%u, dev %s\n",
2026 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
2037 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
2041 int ip_route_input(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
2042 u8 tos
, struct net_device
*dev
)
2044 struct rtable
* rth
;
2046 int iif
= dev
->ifindex
;
2048 tos
&= IPTOS_RT_MASK
;
2049 hash
= rt_hash_code(daddr
, saddr
^ (iif
<< 5), tos
);
2052 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2053 rth
= rcu_dereference(rth
->u
.rt_next
)) {
2054 if (rth
->fl
.fl4_dst
== daddr
&&
2055 rth
->fl
.fl4_src
== saddr
&&
2056 rth
->fl
.iif
== iif
&&
2058 #ifdef CONFIG_IP_ROUTE_FWMARK
2059 rth
->fl
.fl4_fwmark
== skb
->nfmark
&&
2061 rth
->fl
.fl4_tos
== tos
) {
2062 rth
->u
.dst
.lastuse
= jiffies
;
2063 dst_hold(&rth
->u
.dst
);
2065 RT_CACHE_STAT_INC(in_hit
);
2067 skb
->dst
= (struct dst_entry
*)rth
;
2070 RT_CACHE_STAT_INC(in_hlist_search
);
2074 /* Multicast recognition logic is moved from route cache to here.
2075 The problem was that too many Ethernet cards have broken/missing
2076 hardware multicast filters :-( As result the host on multicasting
2077 network acquires a lot of useless route cache entries, sort of
2078 SDR messages from all the world. Now we try to get rid of them.
2079 Really, provided software IP multicast filter is organized
2080 reasonably (at least, hashed), it does not result in a slowdown
2081 comparing with route cache reject entries.
2082 Note, that multicast routers are not affected, because
2083 route cache entry is created eventually.
2085 if (MULTICAST(daddr
)) {
2086 struct in_device
*in_dev
;
2089 if ((in_dev
= __in_dev_get(dev
)) != NULL
) {
2090 int our
= ip_check_mc(in_dev
, daddr
, saddr
,
2091 skb
->nh
.iph
->protocol
);
2093 #ifdef CONFIG_IP_MROUTE
2094 || (!LOCAL_MCAST(daddr
) && IN_DEV_MFORWARD(in_dev
))
2098 return ip_route_input_mc(skb
, daddr
, saddr
,
2105 return ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
2108 static inline int __mkroute_output(struct rtable
**result
,
2109 struct fib_result
* res
,
2110 const struct flowi
*fl
,
2111 const struct flowi
*oldflp
,
2112 struct net_device
*dev_out
,
2116 struct in_device
*in_dev
;
2117 u32 tos
= RT_FL_TOS(oldflp
);
2120 if (LOOPBACK(fl
->fl4_src
) && !(dev_out
->flags
&IFF_LOOPBACK
))
2123 if (fl
->fl4_dst
== 0xFFFFFFFF)
2124 res
->type
= RTN_BROADCAST
;
2125 else if (MULTICAST(fl
->fl4_dst
))
2126 res
->type
= RTN_MULTICAST
;
2127 else if (BADCLASS(fl
->fl4_dst
) || ZERONET(fl
->fl4_dst
))
2130 if (dev_out
->flags
& IFF_LOOPBACK
)
2131 flags
|= RTCF_LOCAL
;
2133 /* get work reference to inet device */
2134 in_dev
= in_dev_get(dev_out
);
2138 if (res
->type
== RTN_BROADCAST
) {
2139 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
2141 fib_info_put(res
->fi
);
2144 } else if (res
->type
== RTN_MULTICAST
) {
2145 flags
|= RTCF_MULTICAST
|RTCF_LOCAL
;
2146 if (!ip_check_mc(in_dev
, oldflp
->fl4_dst
, oldflp
->fl4_src
,
2148 flags
&= ~RTCF_LOCAL
;
2149 /* If multicast route do not exist use
2150 default one, but do not gateway in this case.
2153 if (res
->fi
&& res
->prefixlen
< 4) {
2154 fib_info_put(res
->fi
);
2160 rth
= dst_alloc(&ipv4_dst_ops
);
2166 rth
->u
.dst
.flags
= DST_HOST
;
2167 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2169 rth
->rt_multipath_alg
= res
->fi
->fib_mp_alg
;
2170 if (res
->fi
->fib_nhs
> 1)
2171 rth
->u
.dst
.flags
|= DST_BALANCED
;
2174 if (in_dev
->cnf
.no_xfrm
)
2175 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2176 if (in_dev
->cnf
.no_policy
)
2177 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2179 rth
->fl
.fl4_dst
= oldflp
->fl4_dst
;
2180 rth
->fl
.fl4_tos
= tos
;
2181 rth
->fl
.fl4_src
= oldflp
->fl4_src
;
2182 rth
->fl
.oif
= oldflp
->oif
;
2183 #ifdef CONFIG_IP_ROUTE_FWMARK
2184 rth
->fl
.fl4_fwmark
= oldflp
->fl4_fwmark
;
2186 rth
->rt_dst
= fl
->fl4_dst
;
2187 rth
->rt_src
= fl
->fl4_src
;
2188 rth
->rt_iif
= oldflp
->oif
? : dev_out
->ifindex
;
2189 /* get references to the devices that are to be hold by the routing
2191 rth
->u
.dst
.dev
= dev_out
;
2193 rth
->idev
= in_dev_get(dev_out
);
2194 rth
->rt_gateway
= fl
->fl4_dst
;
2195 rth
->rt_spec_dst
= fl
->fl4_src
;
2197 rth
->u
.dst
.output
=ip_output
;
2199 RT_CACHE_STAT_INC(out_slow_tot
);
2201 if (flags
& RTCF_LOCAL
) {
2202 rth
->u
.dst
.input
= ip_local_deliver
;
2203 rth
->rt_spec_dst
= fl
->fl4_dst
;
2205 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
2206 rth
->rt_spec_dst
= fl
->fl4_src
;
2207 if (flags
& RTCF_LOCAL
&&
2208 !(dev_out
->flags
& IFF_LOOPBACK
)) {
2209 rth
->u
.dst
.output
= ip_mc_output
;
2210 RT_CACHE_STAT_INC(out_slow_mc
);
2212 #ifdef CONFIG_IP_MROUTE
2213 if (res
->type
== RTN_MULTICAST
) {
2214 if (IN_DEV_MFORWARD(in_dev
) &&
2215 !LOCAL_MCAST(oldflp
->fl4_dst
)) {
2216 rth
->u
.dst
.input
= ip_mr_input
;
2217 rth
->u
.dst
.output
= ip_mc_output
;
2223 rt_set_nexthop(rth
, res
, 0);
2225 rth
->rt_flags
= flags
;
2229 /* release work reference to inet device */
2235 static inline int ip_mkroute_output_def(struct rtable
**rp
,
2236 struct fib_result
* res
,
2237 const struct flowi
*fl
,
2238 const struct flowi
*oldflp
,
2239 struct net_device
*dev_out
,
2243 int err
= __mkroute_output(&rth
, res
, fl
, oldflp
, dev_out
, flags
);
2246 u32 tos
= RT_FL_TOS(oldflp
);
2248 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2250 hash
= rt_hash_code(oldflp
->fl4_dst
,
2251 oldflp
->fl4_src
^ (oldflp
->oif
<< 5), tos
);
2252 err
= rt_intern_hash(hash
, rth
, rp
);
2258 static inline int ip_mkroute_output(struct rtable
** rp
,
2259 struct fib_result
* res
,
2260 const struct flowi
*fl
,
2261 const struct flowi
*oldflp
,
2262 struct net_device
*dev_out
,
2265 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2266 u32 tos
= RT_FL_TOS(oldflp
);
2272 if (res
->fi
&& res
->fi
->fib_nhs
> 1) {
2273 unsigned char hopcount
= res
->fi
->fib_nhs
;
2275 for (hop
= 0; hop
< hopcount
; hop
++) {
2276 struct net_device
*dev2nexthop
;
2280 /* hold a work reference to the output device */
2281 dev2nexthop
= FIB_RES_DEV(*res
);
2282 dev_hold(dev2nexthop
);
2284 err
= __mkroute_output(&rth
, res
, fl
, oldflp
,
2285 dev2nexthop
, flags
);
2290 hash
= rt_hash_code(oldflp
->fl4_dst
,
2292 (oldflp
->oif
<< 5), tos
);
2293 err
= rt_intern_hash(hash
, rth
, rp
);
2295 /* forward hop information to multipath impl. */
2296 multipath_set_nhinfo(rth
,
2297 FIB_RES_NETWORK(*res
),
2298 FIB_RES_NETMASK(*res
),
2302 /* release work reference to output device */
2303 dev_put(dev2nexthop
);
2308 atomic_set(&(*rp
)->u
.dst
.__refcnt
, 1);
2311 return ip_mkroute_output_def(rp
, res
, fl
, oldflp
, dev_out
,
2314 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
2315 return ip_mkroute_output_def(rp
, res
, fl
, oldflp
, dev_out
, flags
);
2320 * Major route resolver routine.
2323 static int ip_route_output_slow(struct rtable
**rp
, const struct flowi
*oldflp
)
2325 u32 tos
= RT_FL_TOS(oldflp
);
2326 struct flowi fl
= { .nl_u
= { .ip4_u
=
2327 { .daddr
= oldflp
->fl4_dst
,
2328 .saddr
= oldflp
->fl4_src
,
2329 .tos
= tos
& IPTOS_RT_MASK
,
2330 .scope
= ((tos
& RTO_ONLINK
) ?
2333 #ifdef CONFIG_IP_ROUTE_FWMARK
2334 .fwmark
= oldflp
->fl4_fwmark
2337 .iif
= loopback_dev
.ifindex
,
2338 .oif
= oldflp
->oif
};
2339 struct fib_result res
;
2341 struct net_device
*dev_out
= NULL
;
2347 #ifdef CONFIG_IP_MULTIPLE_TABLES
2351 if (oldflp
->fl4_src
) {
2353 if (MULTICAST(oldflp
->fl4_src
) ||
2354 BADCLASS(oldflp
->fl4_src
) ||
2355 ZERONET(oldflp
->fl4_src
))
2358 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2359 dev_out
= ip_dev_find(oldflp
->fl4_src
);
2360 if (dev_out
== NULL
)
2363 /* I removed check for oif == dev_out->oif here.
2364 It was wrong for two reasons:
2365 1. ip_dev_find(saddr) can return wrong iface, if saddr is
2366 assigned to multiple interfaces.
2367 2. Moreover, we are allowed to send packets with saddr
2368 of another iface. --ANK
2371 if (oldflp
->oif
== 0
2372 && (MULTICAST(oldflp
->fl4_dst
) || oldflp
->fl4_dst
== 0xFFFFFFFF)) {
2373 /* Special hack: user can direct multicasts
2374 and limited broadcast via necessary interface
2375 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2376 This hack is not just for fun, it allows
2377 vic,vat and friends to work.
2378 They bind socket to loopback, set ttl to zero
2379 and expect that it will work.
2380 From the viewpoint of routing cache they are broken,
2381 because we are not allowed to build multicast path
2382 with loopback source addr (look, routing cache
2383 cannot know, that ttl is zero, so that packet
2384 will not leave this host and route is valid).
2385 Luckily, this hack is good workaround.
2388 fl
.oif
= dev_out
->ifindex
;
2398 dev_out
= dev_get_by_index(oldflp
->oif
);
2400 if (dev_out
== NULL
)
2402 if (__in_dev_get(dev_out
) == NULL
) {
2404 goto out
; /* Wrong error code */
2407 if (LOCAL_MCAST(oldflp
->fl4_dst
) || oldflp
->fl4_dst
== 0xFFFFFFFF) {
2409 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2414 if (MULTICAST(oldflp
->fl4_dst
))
2415 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2417 else if (!oldflp
->fl4_dst
)
2418 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2424 fl
.fl4_dst
= fl
.fl4_src
;
2426 fl
.fl4_dst
= fl
.fl4_src
= htonl(INADDR_LOOPBACK
);
2429 dev_out
= &loopback_dev
;
2431 fl
.oif
= loopback_dev
.ifindex
;
2432 res
.type
= RTN_LOCAL
;
2433 flags
|= RTCF_LOCAL
;
2437 if (fib_lookup(&fl
, &res
)) {
2440 /* Apparently, routing tables are wrong. Assume,
2441 that the destination is on link.
2444 Because we are allowed to send to iface
2445 even if it has NO routes and NO assigned
2446 addresses. When oif is specified, routing
2447 tables are looked up with only one purpose:
2448 to catch if destination is gatewayed, rather than
2449 direct. Moreover, if MSG_DONTROUTE is set,
2450 we send packet, ignoring both routing tables
2451 and ifaddr state. --ANK
2454 We could make it even if oif is unknown,
2455 likely IPv6, but we do not.
2458 if (fl
.fl4_src
== 0)
2459 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2461 res
.type
= RTN_UNICAST
;
2471 if (res
.type
== RTN_LOCAL
) {
2473 fl
.fl4_src
= fl
.fl4_dst
;
2476 dev_out
= &loopback_dev
;
2478 fl
.oif
= dev_out
->ifindex
;
2480 fib_info_put(res
.fi
);
2482 flags
|= RTCF_LOCAL
;
2486 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2487 if (res
.fi
->fib_nhs
> 1 && fl
.oif
== 0)
2488 fib_select_multipath(&fl
, &res
);
2491 if (!res
.prefixlen
&& res
.type
== RTN_UNICAST
&& !fl
.oif
)
2492 fib_select_default(&fl
, &res
);
2495 fl
.fl4_src
= FIB_RES_PREFSRC(res
);
2499 dev_out
= FIB_RES_DEV(res
);
2501 fl
.oif
= dev_out
->ifindex
;
2505 err
= ip_mkroute_output(rp
, &res
, &fl
, oldflp
, dev_out
, flags
);
2515 int __ip_route_output_key(struct rtable
**rp
, const struct flowi
*flp
)
2520 hash
= rt_hash_code(flp
->fl4_dst
, flp
->fl4_src
^ (flp
->oif
<< 5), flp
->fl4_tos
);
2523 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2524 rth
= rcu_dereference(rth
->u
.rt_next
)) {
2525 if (rth
->fl
.fl4_dst
== flp
->fl4_dst
&&
2526 rth
->fl
.fl4_src
== flp
->fl4_src
&&
2528 rth
->fl
.oif
== flp
->oif
&&
2529 #ifdef CONFIG_IP_ROUTE_FWMARK
2530 rth
->fl
.fl4_fwmark
== flp
->fl4_fwmark
&&
2532 !((rth
->fl
.fl4_tos
^ flp
->fl4_tos
) &
2533 (IPTOS_RT_MASK
| RTO_ONLINK
))) {
2535 /* check for multipath routes and choose one if
2538 if (multipath_select_route(flp
, rth
, rp
)) {
2539 dst_hold(&(*rp
)->u
.dst
);
2540 RT_CACHE_STAT_INC(out_hit
);
2541 rcu_read_unlock_bh();
2545 rth
->u
.dst
.lastuse
= jiffies
;
2546 dst_hold(&rth
->u
.dst
);
2548 RT_CACHE_STAT_INC(out_hit
);
2549 rcu_read_unlock_bh();
2553 RT_CACHE_STAT_INC(out_hlist_search
);
2555 rcu_read_unlock_bh();
2557 return ip_route_output_slow(rp
, flp
);
2560 int ip_route_output_flow(struct rtable
**rp
, struct flowi
*flp
, struct sock
*sk
, int flags
)
2564 if ((err
= __ip_route_output_key(rp
, flp
)) != 0)
2569 flp
->fl4_src
= (*rp
)->rt_src
;
2571 flp
->fl4_dst
= (*rp
)->rt_dst
;
2572 return xfrm_lookup((struct dst_entry
**)rp
, flp
, sk
, flags
);
2578 int ip_route_output_key(struct rtable
**rp
, struct flowi
*flp
)
2580 return ip_route_output_flow(rp
, flp
, NULL
, 0);
2583 static int rt_fill_info(struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
2586 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
2588 struct nlmsghdr
*nlh
;
2589 unsigned char *b
= skb
->tail
;
2590 struct rta_cacheinfo ci
;
2591 #ifdef CONFIG_IP_MROUTE
2592 struct rtattr
*eptr
;
2594 nlh
= NLMSG_PUT(skb
, pid
, seq
, event
, sizeof(*r
));
2595 r
= NLMSG_DATA(nlh
);
2596 nlh
->nlmsg_flags
= (nowait
&& pid
) ? NLM_F_MULTI
: 0;
2597 r
->rtm_family
= AF_INET
;
2598 r
->rtm_dst_len
= 32;
2600 r
->rtm_tos
= rt
->fl
.fl4_tos
;
2601 r
->rtm_table
= RT_TABLE_MAIN
;
2602 r
->rtm_type
= rt
->rt_type
;
2603 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2604 r
->rtm_protocol
= RTPROT_UNSPEC
;
2605 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2606 if (rt
->rt_flags
& RTCF_NOTIFY
)
2607 r
->rtm_flags
|= RTM_F_NOTIFY
;
2608 RTA_PUT(skb
, RTA_DST
, 4, &rt
->rt_dst
);
2609 if (rt
->fl
.fl4_src
) {
2610 r
->rtm_src_len
= 32;
2611 RTA_PUT(skb
, RTA_SRC
, 4, &rt
->fl
.fl4_src
);
2614 RTA_PUT(skb
, RTA_OIF
, sizeof(int), &rt
->u
.dst
.dev
->ifindex
);
2615 #ifdef CONFIG_NET_CLS_ROUTE
2616 if (rt
->u
.dst
.tclassid
)
2617 RTA_PUT(skb
, RTA_FLOW
, 4, &rt
->u
.dst
.tclassid
);
2619 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2620 if (rt
->rt_multipath_alg
!= IP_MP_ALG_NONE
) {
2621 __u32 alg
= rt
->rt_multipath_alg
;
2623 RTA_PUT(skb
, RTA_MP_ALGO
, 4, &alg
);
2627 RTA_PUT(skb
, RTA_PREFSRC
, 4, &rt
->rt_spec_dst
);
2628 else if (rt
->rt_src
!= rt
->fl
.fl4_src
)
2629 RTA_PUT(skb
, RTA_PREFSRC
, 4, &rt
->rt_src
);
2630 if (rt
->rt_dst
!= rt
->rt_gateway
)
2631 RTA_PUT(skb
, RTA_GATEWAY
, 4, &rt
->rt_gateway
);
2632 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2633 goto rtattr_failure
;
2634 ci
.rta_lastuse
= jiffies_to_clock_t(jiffies
- rt
->u
.dst
.lastuse
);
2635 ci
.rta_used
= rt
->u
.dst
.__use
;
2636 ci
.rta_clntref
= atomic_read(&rt
->u
.dst
.__refcnt
);
2637 if (rt
->u
.dst
.expires
)
2638 ci
.rta_expires
= jiffies_to_clock_t(rt
->u
.dst
.expires
- jiffies
);
2641 ci
.rta_error
= rt
->u
.dst
.error
;
2642 ci
.rta_id
= ci
.rta_ts
= ci
.rta_tsage
= 0;
2644 ci
.rta_id
= rt
->peer
->ip_id_count
;
2645 if (rt
->peer
->tcp_ts_stamp
) {
2646 ci
.rta_ts
= rt
->peer
->tcp_ts
;
2647 ci
.rta_tsage
= xtime
.tv_sec
- rt
->peer
->tcp_ts_stamp
;
2650 #ifdef CONFIG_IP_MROUTE
2651 eptr
= (struct rtattr
*)skb
->tail
;
2653 RTA_PUT(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
2655 #ifdef CONFIG_IP_MROUTE
2656 u32 dst
= rt
->rt_dst
;
2658 if (MULTICAST(dst
) && !LOCAL_MCAST(dst
) &&
2659 ipv4_devconf
.mc_forwarding
) {
2660 int err
= ipmr_get_route(skb
, r
, nowait
);
2667 if (err
== -EMSGSIZE
)
2669 ((struct rta_cacheinfo
*)RTA_DATA(eptr
))->rta_error
= err
;
2674 RTA_PUT(skb
, RTA_IIF
, sizeof(int), &rt
->fl
.iif
);
2677 nlh
->nlmsg_len
= skb
->tail
- b
;
2682 skb_trim(skb
, b
- skb
->data
);
2686 int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2688 struct rtattr
**rta
= arg
;
2689 struct rtmsg
*rtm
= NLMSG_DATA(nlh
);
2690 struct rtable
*rt
= NULL
;
2695 struct sk_buff
*skb
;
2697 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2701 /* Reserve room for dummy headers, this skb can pass
2702 through good chunk of routing engine.
2704 skb
->mac
.raw
= skb
->data
;
2705 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2707 if (rta
[RTA_SRC
- 1])
2708 memcpy(&src
, RTA_DATA(rta
[RTA_SRC
- 1]), 4);
2709 if (rta
[RTA_DST
- 1])
2710 memcpy(&dst
, RTA_DATA(rta
[RTA_DST
- 1]), 4);
2711 if (rta
[RTA_IIF
- 1])
2712 memcpy(&iif
, RTA_DATA(rta
[RTA_IIF
- 1]), sizeof(int));
2715 struct net_device
*dev
= __dev_get_by_index(iif
);
2719 skb
->protocol
= htons(ETH_P_IP
);
2722 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2724 rt
= (struct rtable
*)skb
->dst
;
2725 if (!err
&& rt
->u
.dst
.error
)
2726 err
= -rt
->u
.dst
.error
;
2728 struct flowi fl
= { .nl_u
= { .ip4_u
= { .daddr
= dst
,
2730 .tos
= rtm
->rtm_tos
} } };
2732 if (rta
[RTA_OIF
- 1])
2733 memcpy(&oif
, RTA_DATA(rta
[RTA_OIF
- 1]), sizeof(int));
2735 err
= ip_route_output_key(&rt
, &fl
);
2740 skb
->dst
= &rt
->u
.dst
;
2741 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2742 rt
->rt_flags
|= RTCF_NOTIFY
;
2744 NETLINK_CB(skb
).dst_pid
= NETLINK_CB(in_skb
).pid
;
2746 err
= rt_fill_info(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
2755 err
= netlink_unicast(rtnl
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
2765 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2772 s_idx
= idx
= cb
->args
[1];
2773 for (h
= 0; h
<= rt_hash_mask
; h
++) {
2774 if (h
< s_h
) continue;
2778 for (rt
= rcu_dereference(rt_hash_table
[h
].chain
), idx
= 0; rt
;
2779 rt
= rcu_dereference(rt
->u
.rt_next
), idx
++) {
2782 skb
->dst
= dst_clone(&rt
->u
.dst
);
2783 if (rt_fill_info(skb
, NETLINK_CB(cb
->skb
).pid
,
2785 RTM_NEWROUTE
, 1) <= 0) {
2786 dst_release(xchg(&skb
->dst
, NULL
));
2787 rcu_read_unlock_bh();
2790 dst_release(xchg(&skb
->dst
, NULL
));
2792 rcu_read_unlock_bh();
2801 void ip_rt_multicast_event(struct in_device
*in_dev
)
2806 #ifdef CONFIG_SYSCTL
2807 static int flush_delay
;
2809 static int ipv4_sysctl_rtcache_flush(ctl_table
*ctl
, int write
,
2810 struct file
*filp
, void __user
*buffer
,
2811 size_t *lenp
, loff_t
*ppos
)
2814 proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
2815 rt_cache_flush(flush_delay
);
2822 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table
*table
,
2825 void __user
*oldval
,
2826 size_t __user
*oldlenp
,
2827 void __user
*newval
,
2832 if (newlen
!= sizeof(int))
2834 if (get_user(delay
, (int __user
*)newval
))
2836 rt_cache_flush(delay
);
2840 ctl_table ipv4_route_table
[] = {
2842 .ctl_name
= NET_IPV4_ROUTE_FLUSH
,
2843 .procname
= "flush",
2844 .data
= &flush_delay
,
2845 .maxlen
= sizeof(int),
2847 .proc_handler
= &ipv4_sysctl_rtcache_flush
,
2848 .strategy
= &ipv4_sysctl_rtcache_flush_strategy
,
2851 .ctl_name
= NET_IPV4_ROUTE_MIN_DELAY
,
2852 .procname
= "min_delay",
2853 .data
= &ip_rt_min_delay
,
2854 .maxlen
= sizeof(int),
2856 .proc_handler
= &proc_dointvec_jiffies
,
2857 .strategy
= &sysctl_jiffies
,
2860 .ctl_name
= NET_IPV4_ROUTE_MAX_DELAY
,
2861 .procname
= "max_delay",
2862 .data
= &ip_rt_max_delay
,
2863 .maxlen
= sizeof(int),
2865 .proc_handler
= &proc_dointvec_jiffies
,
2866 .strategy
= &sysctl_jiffies
,
2869 .ctl_name
= NET_IPV4_ROUTE_GC_THRESH
,
2870 .procname
= "gc_thresh",
2871 .data
= &ipv4_dst_ops
.gc_thresh
,
2872 .maxlen
= sizeof(int),
2874 .proc_handler
= &proc_dointvec
,
2877 .ctl_name
= NET_IPV4_ROUTE_MAX_SIZE
,
2878 .procname
= "max_size",
2879 .data
= &ip_rt_max_size
,
2880 .maxlen
= sizeof(int),
2882 .proc_handler
= &proc_dointvec
,
2885 /* Deprecated. Use gc_min_interval_ms */
2887 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL
,
2888 .procname
= "gc_min_interval",
2889 .data
= &ip_rt_gc_min_interval
,
2890 .maxlen
= sizeof(int),
2892 .proc_handler
= &proc_dointvec_jiffies
,
2893 .strategy
= &sysctl_jiffies
,
2896 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS
,
2897 .procname
= "gc_min_interval_ms",
2898 .data
= &ip_rt_gc_min_interval
,
2899 .maxlen
= sizeof(int),
2901 .proc_handler
= &proc_dointvec_ms_jiffies
,
2902 .strategy
= &sysctl_ms_jiffies
,
2905 .ctl_name
= NET_IPV4_ROUTE_GC_TIMEOUT
,
2906 .procname
= "gc_timeout",
2907 .data
= &ip_rt_gc_timeout
,
2908 .maxlen
= sizeof(int),
2910 .proc_handler
= &proc_dointvec_jiffies
,
2911 .strategy
= &sysctl_jiffies
,
2914 .ctl_name
= NET_IPV4_ROUTE_GC_INTERVAL
,
2915 .procname
= "gc_interval",
2916 .data
= &ip_rt_gc_interval
,
2917 .maxlen
= sizeof(int),
2919 .proc_handler
= &proc_dointvec_jiffies
,
2920 .strategy
= &sysctl_jiffies
,
2923 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_LOAD
,
2924 .procname
= "redirect_load",
2925 .data
= &ip_rt_redirect_load
,
2926 .maxlen
= sizeof(int),
2928 .proc_handler
= &proc_dointvec
,
2931 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_NUMBER
,
2932 .procname
= "redirect_number",
2933 .data
= &ip_rt_redirect_number
,
2934 .maxlen
= sizeof(int),
2936 .proc_handler
= &proc_dointvec
,
2939 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_SILENCE
,
2940 .procname
= "redirect_silence",
2941 .data
= &ip_rt_redirect_silence
,
2942 .maxlen
= sizeof(int),
2944 .proc_handler
= &proc_dointvec
,
2947 .ctl_name
= NET_IPV4_ROUTE_ERROR_COST
,
2948 .procname
= "error_cost",
2949 .data
= &ip_rt_error_cost
,
2950 .maxlen
= sizeof(int),
2952 .proc_handler
= &proc_dointvec
,
2955 .ctl_name
= NET_IPV4_ROUTE_ERROR_BURST
,
2956 .procname
= "error_burst",
2957 .data
= &ip_rt_error_burst
,
2958 .maxlen
= sizeof(int),
2960 .proc_handler
= &proc_dointvec
,
2963 .ctl_name
= NET_IPV4_ROUTE_GC_ELASTICITY
,
2964 .procname
= "gc_elasticity",
2965 .data
= &ip_rt_gc_elasticity
,
2966 .maxlen
= sizeof(int),
2968 .proc_handler
= &proc_dointvec
,
2971 .ctl_name
= NET_IPV4_ROUTE_MTU_EXPIRES
,
2972 .procname
= "mtu_expires",
2973 .data
= &ip_rt_mtu_expires
,
2974 .maxlen
= sizeof(int),
2976 .proc_handler
= &proc_dointvec_jiffies
,
2977 .strategy
= &sysctl_jiffies
,
2980 .ctl_name
= NET_IPV4_ROUTE_MIN_PMTU
,
2981 .procname
= "min_pmtu",
2982 .data
= &ip_rt_min_pmtu
,
2983 .maxlen
= sizeof(int),
2985 .proc_handler
= &proc_dointvec
,
2988 .ctl_name
= NET_IPV4_ROUTE_MIN_ADVMSS
,
2989 .procname
= "min_adv_mss",
2990 .data
= &ip_rt_min_advmss
,
2991 .maxlen
= sizeof(int),
2993 .proc_handler
= &proc_dointvec
,
2996 .ctl_name
= NET_IPV4_ROUTE_SECRET_INTERVAL
,
2997 .procname
= "secret_interval",
2998 .data
= &ip_rt_secret_interval
,
2999 .maxlen
= sizeof(int),
3001 .proc_handler
= &proc_dointvec_jiffies
,
3002 .strategy
= &sysctl_jiffies
,
3008 #ifdef CONFIG_NET_CLS_ROUTE
3009 struct ip_rt_acct
*ip_rt_acct
;
3011 /* This code sucks. But you should have seen it before! --RR */
3013 /* IP route accounting ptr for this logical cpu number. */
3014 #define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
3016 #ifdef CONFIG_PROC_FS
3017 static int ip_rt_acct_read(char *buffer
, char **start
, off_t offset
,
3018 int length
, int *eof
, void *data
)
3022 if ((offset
& 3) || (length
& 3))
3025 if (offset
>= sizeof(struct ip_rt_acct
) * 256) {
3030 if (offset
+ length
>= sizeof(struct ip_rt_acct
) * 256) {
3031 length
= sizeof(struct ip_rt_acct
) * 256 - offset
;
3035 offset
/= sizeof(u32
);
3038 u32
*src
= ((u32
*) IP_RT_ACCT_CPU(0)) + offset
;
3039 u32
*dst
= (u32
*) buffer
;
3041 /* Copy first cpu. */
3043 memcpy(dst
, src
, length
);
3045 /* Add the other cpus in, one int at a time */
3049 src
= ((u32
*) IP_RT_ACCT_CPU(i
)) + offset
;
3051 for (j
= 0; j
< length
/4; j
++)
3057 #endif /* CONFIG_PROC_FS */
3058 #endif /* CONFIG_NET_CLS_ROUTE */
3060 static __initdata
unsigned long rhash_entries
;
3061 static int __init
set_rhash_entries(char *str
)
3065 rhash_entries
= simple_strtoul(str
, &str
, 0);
3068 __setup("rhash_entries=", set_rhash_entries
);
3070 int __init
ip_rt_init(void)
3072 int i
, order
, goal
, rc
= 0;
3074 rt_hash_rnd
= (int) ((num_physpages
^ (num_physpages
>>8)) ^
3075 (jiffies
^ (jiffies
>> 7)));
3077 #ifdef CONFIG_NET_CLS_ROUTE
3079 (PAGE_SIZE
<< order
) < 256 * sizeof(struct ip_rt_acct
) * NR_CPUS
; order
++)
3081 ip_rt_acct
= (struct ip_rt_acct
*)__get_free_pages(GFP_KERNEL
, order
);
3083 panic("IP: failed to allocate ip_rt_acct\n");
3084 memset(ip_rt_acct
, 0, PAGE_SIZE
<< order
);
3087 ipv4_dst_ops
.kmem_cachep
= kmem_cache_create("ip_dst_cache",
3088 sizeof(struct rtable
),
3089 0, SLAB_HWCACHE_ALIGN
,
3092 if (!ipv4_dst_ops
.kmem_cachep
)
3093 panic("IP: failed to allocate ip_dst_cache\n");
3095 goal
= num_physpages
>> (26 - PAGE_SHIFT
);
3097 goal
= (rhash_entries
* sizeof(struct rt_hash_bucket
)) >> PAGE_SHIFT
;
3098 for (order
= 0; (1UL << order
) < goal
; order
++)
3102 rt_hash_mask
= (1UL << order
) * PAGE_SIZE
/
3103 sizeof(struct rt_hash_bucket
);
3104 while (rt_hash_mask
& (rt_hash_mask
- 1))
3106 rt_hash_table
= (struct rt_hash_bucket
*)
3107 __get_free_pages(GFP_ATOMIC
, order
);
3108 } while (rt_hash_table
== NULL
&& --order
> 0);
3111 panic("Failed to allocate IP route cache hash table\n");
3113 printk(KERN_INFO
"IP: routing cache hash table of %u buckets, %ldKbytes\n",
3115 (long) (rt_hash_mask
* sizeof(struct rt_hash_bucket
)) / 1024);
3117 for (rt_hash_log
= 0; (1 << rt_hash_log
) != rt_hash_mask
; rt_hash_log
++)
3121 for (i
= 0; i
<= rt_hash_mask
; i
++) {
3122 spin_lock_init(&rt_hash_table
[i
].lock
);
3123 rt_hash_table
[i
].chain
= NULL
;
3126 ipv4_dst_ops
.gc_thresh
= (rt_hash_mask
+ 1);
3127 ip_rt_max_size
= (rt_hash_mask
+ 1) * 16;
3129 rt_cache_stat
= alloc_percpu(struct rt_cache_stat
);
3136 init_timer(&rt_flush_timer
);
3137 rt_flush_timer
.function
= rt_run_flush
;
3138 init_timer(&rt_periodic_timer
);
3139 rt_periodic_timer
.function
= rt_check_expire
;
3140 init_timer(&rt_secret_timer
);
3141 rt_secret_timer
.function
= rt_secret_rebuild
;
3143 /* All the timers, started at system startup tend
3144 to synchronize. Perturb it a bit.
3146 rt_periodic_timer
.expires
= jiffies
+ net_random() % ip_rt_gc_interval
+
3148 add_timer(&rt_periodic_timer
);
3150 rt_secret_timer
.expires
= jiffies
+ net_random() % ip_rt_secret_interval
+
3151 ip_rt_secret_interval
;
3152 add_timer(&rt_secret_timer
);
3154 #ifdef CONFIG_PROC_FS
3156 struct proc_dir_entry
*rtstat_pde
= NULL
; /* keep gcc happy */
3157 if (!proc_net_fops_create("rt_cache", S_IRUGO
, &rt_cache_seq_fops
) ||
3158 !(rtstat_pde
= create_proc_entry("rt_cache", S_IRUGO
,
3160 free_percpu(rt_cache_stat
);
3163 rtstat_pde
->proc_fops
= &rt_cpu_seq_fops
;
3165 #ifdef CONFIG_NET_CLS_ROUTE
3166 create_proc_read_entry("rt_acct", 0, proc_net
, ip_rt_acct_read
, NULL
);
3176 EXPORT_SYMBOL(__ip_select_ident
);
3177 EXPORT_SYMBOL(ip_route_input
);
3178 EXPORT_SYMBOL(ip_route_output_key
);