1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Definitions for the IP router.
9 * Version: @(#)route.h 1.0.4 05/27/93
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Alan Cox : Reformatted. Added ip_rt_local()
15 * Alan Cox : Support for TCP parameters.
16 * Alexey Kuznetsov: Major changes for new routing code.
17 * Mike McLagan : Routing by source
18 * Robert Olsson : Added rt_cache statistics
24 #include <net/inetpeer.h>
26 #include <net/inet_sock.h>
27 #include <net/ip_fib.h>
29 #include <net/ndisc.h>
30 #include <linux/in_route.h>
31 #include <linux/rtnetlink.h>
32 #include <linux/rcupdate.h>
33 #include <linux/route.h>
35 #include <linux/cache.h>
36 #include <linux/security.h>
38 /* IPv4 datagram length is stored into 16bit field (tot_len) */
39 #define IP_MAX_MTU 0xFFFFU
41 #define RTO_ONLINK 0x01
43 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
44 #define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
53 unsigned int rt_flags
;
61 /* Info on neighbour */
64 struct in6_addr rt_gw6
;
67 /* Miscellaneous cached information */
71 struct list_head rt_uncached
;
72 struct uncached_list
*rt_uncached_list
;
75 static inline bool rt_is_input_route(const struct rtable
*rt
)
77 return rt
->rt_is_input
!= 0;
80 static inline bool rt_is_output_route(const struct rtable
*rt
)
82 return rt
->rt_is_input
== 0;
85 static inline __be32
rt_nexthop(const struct rtable
*rt
, __be32 daddr
)
87 if (rt
->rt_gw_family
== AF_INET
)
99 struct rt_cache_stat
{
100 unsigned int in_slow_tot
;
101 unsigned int in_slow_mc
;
102 unsigned int in_no_route
;
104 unsigned int in_martian_dst
;
105 unsigned int in_martian_src
;
106 unsigned int out_slow_tot
;
107 unsigned int out_slow_mc
;
110 extern struct ip_rt_acct __percpu
*ip_rt_acct
;
114 int ip_rt_init(void);
115 void rt_cache_flush(struct net
*net
);
116 void rt_flush_dev(struct net_device
*dev
);
117 struct rtable
*ip_route_output_key_hash(struct net
*net
, struct flowi4
*flp
,
118 const struct sk_buff
*skb
);
119 struct rtable
*ip_route_output_key_hash_rcu(struct net
*net
, struct flowi4
*flp
,
120 struct fib_result
*res
,
121 const struct sk_buff
*skb
);
123 static inline struct rtable
*__ip_route_output_key(struct net
*net
,
126 return ip_route_output_key_hash(net
, flp
, NULL
);
129 struct rtable
*ip_route_output_flow(struct net
*, struct flowi4
*flp
,
130 const struct sock
*sk
);
131 struct dst_entry
*ipv4_blackhole_route(struct net
*net
,
132 struct dst_entry
*dst_orig
);
134 static inline struct rtable
*ip_route_output_key(struct net
*net
, struct flowi4
*flp
)
136 return ip_route_output_flow(net
, flp
, NULL
);
139 static inline struct rtable
*ip_route_output(struct net
*net
, __be32 daddr
,
140 __be32 saddr
, u8 tos
, int oif
)
142 struct flowi4 fl4
= {
148 return ip_route_output_key(net
, &fl4
);
151 static inline struct rtable
*ip_route_output_ports(struct net
*net
, struct flowi4
*fl4
,
153 __be32 daddr
, __be32 saddr
,
154 __be16 dport
, __be16 sport
,
155 __u8 proto
, __u8 tos
, int oif
)
157 flowi4_init_output(fl4
, oif
, sk
? sk
->sk_mark
: 0, tos
,
158 RT_SCOPE_UNIVERSE
, proto
,
159 sk
? inet_sk_flowi_flags(sk
) : 0,
160 daddr
, saddr
, dport
, sport
, sock_net_uid(net
, sk
));
162 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
163 return ip_route_output_flow(net
, fl4
, sk
);
166 static inline struct rtable
*ip_route_output_gre(struct net
*net
, struct flowi4
*fl4
,
167 __be32 daddr
, __be32 saddr
,
168 __be32 gre_key
, __u8 tos
, int oif
)
170 memset(fl4
, 0, sizeof(*fl4
));
171 fl4
->flowi4_oif
= oif
;
174 fl4
->flowi4_tos
= tos
;
175 fl4
->flowi4_proto
= IPPROTO_GRE
;
176 fl4
->fl4_gre_key
= gre_key
;
177 return ip_route_output_key(net
, fl4
);
179 int ip_mc_validate_source(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
180 u8 tos
, struct net_device
*dev
,
181 struct in_device
*in_dev
, u32
*itag
);
182 int ip_route_input_noref(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
183 u8 tos
, struct net_device
*devin
);
184 int ip_route_input_rcu(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
185 u8 tos
, struct net_device
*devin
,
186 struct fib_result
*res
);
188 int ip_route_use_hint(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
189 u8 tos
, struct net_device
*devin
,
190 const struct sk_buff
*hint
);
192 static inline int ip_route_input(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
193 u8 tos
, struct net_device
*devin
)
198 err
= ip_route_input_noref(skb
, dst
, src
, tos
, devin
);
209 void ipv4_update_pmtu(struct sk_buff
*skb
, struct net
*net
, u32 mtu
, int oif
,
211 void ipv4_sk_update_pmtu(struct sk_buff
*skb
, struct sock
*sk
, u32 mtu
);
212 void ipv4_redirect(struct sk_buff
*skb
, struct net
*net
, int oif
, u8 protocol
);
213 void ipv4_sk_redirect(struct sk_buff
*skb
, struct sock
*sk
);
214 void ip_rt_send_redirect(struct sk_buff
*skb
);
216 unsigned int inet_addr_type(struct net
*net
, __be32 addr
);
217 unsigned int inet_addr_type_table(struct net
*net
, __be32 addr
, u32 tb_id
);
218 unsigned int inet_dev_addr_type(struct net
*net
, const struct net_device
*dev
,
220 unsigned int inet_addr_type_dev_table(struct net
*net
,
221 const struct net_device
*dev
,
223 void ip_rt_multicast_event(struct in_device
*);
224 int ip_rt_ioctl(struct net
*, unsigned int cmd
, struct rtentry
*rt
);
225 void ip_rt_get_source(u8
*src
, struct sk_buff
*skb
, struct rtable
*rt
);
226 struct rtable
*rt_dst_alloc(struct net_device
*dev
,
227 unsigned int flags
, u16 type
,
228 bool nopolicy
, bool noxfrm
, bool will_cache
);
229 struct rtable
*rt_dst_clone(struct net_device
*dev
, struct rtable
*rt
);
232 void fib_add_ifaddr(struct in_ifaddr
*);
233 void fib_del_ifaddr(struct in_ifaddr
*, struct in_ifaddr
*);
234 void fib_modify_prefix_metric(struct in_ifaddr
*ifa
, u32 new_metric
);
236 void rt_add_uncached_list(struct rtable
*rt
);
237 void rt_del_uncached_list(struct rtable
*rt
);
239 int fib_dump_info_fnhe(struct sk_buff
*skb
, struct netlink_callback
*cb
,
240 u32 table_id
, struct fib_info
*fi
,
241 int *fa_index
, int fa_start
, unsigned int flags
);
243 static inline void ip_rt_put(struct rtable
*rt
)
245 /* dst_release() accepts a NULL parameter.
246 * We rely on dst being first structure in struct rtable
248 BUILD_BUG_ON(offsetof(struct rtable
, dst
) != 0);
249 dst_release(&rt
->dst
);
252 #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
254 extern const __u8 ip_tos2prio
[16];
256 static inline char rt_tos2priority(u8 tos
)
258 return ip_tos2prio
[IPTOS_TOS(tos
)>>1];
261 /* ip_route_connect() and ip_route_newports() work in tandem whilst
262 * binding a socket for a new outgoing connection.
264 * In order to use IPSEC properly, we must, in the end, have a
265 * route that was looked up using all available keys including source
266 * and destination ports.
268 * However, if a source port needs to be allocated (the user specified
269 * a wildcard source port) we need to obtain addressing information
270 * in order to perform that allocation.
272 * So ip_route_connect() looks up a route using wildcarded source and
273 * destination ports in the key, simply so that we can get a pair of
274 * addresses to use for port allocation.
276 * Later, once the ports are allocated, ip_route_newports() will make
277 * another route lookup if needed to make sure we catch any IPSEC
278 * rules keyed on the port information.
280 * The callers allocate the flow key on their stack, and must pass in
281 * the same flowi4 object to both the ip_route_connect() and the
282 * ip_route_newports() calls.
285 static inline void ip_route_connect_init(struct flowi4
*fl4
, __be32 dst
, __be32 src
,
286 u32 tos
, int oif
, u8 protocol
,
287 __be16 sport
, __be16 dport
,
292 if (inet_sk(sk
)->transparent
)
293 flow_flags
|= FLOWI_FLAG_ANYSRC
;
295 flowi4_init_output(fl4
, oif
, sk
->sk_mark
, tos
, RT_SCOPE_UNIVERSE
,
296 protocol
, flow_flags
, dst
, src
, dport
, sport
,
300 static inline struct rtable
*ip_route_connect(struct flowi4
*fl4
,
301 __be32 dst
, __be32 src
, u32 tos
,
302 int oif
, u8 protocol
,
303 __be16 sport
, __be16 dport
,
306 struct net
*net
= sock_net(sk
);
309 ip_route_connect_init(fl4
, dst
, src
, tos
, oif
, protocol
,
313 rt
= __ip_route_output_key(net
, fl4
);
317 flowi4_update_output(fl4
, oif
, tos
, fl4
->daddr
, fl4
->saddr
);
319 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
320 return ip_route_output_flow(net
, fl4
, sk
);
323 static inline struct rtable
*ip_route_newports(struct flowi4
*fl4
, struct rtable
*rt
,
324 __be16 orig_sport
, __be16 orig_dport
,
325 __be16 sport
, __be16 dport
,
328 if (sport
!= orig_sport
|| dport
!= orig_dport
) {
329 fl4
->fl4_dport
= dport
;
330 fl4
->fl4_sport
= sport
;
332 flowi4_update_output(fl4
, sk
->sk_bound_dev_if
,
333 RT_CONN_FLAGS(sk
), fl4
->daddr
,
335 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
336 return ip_route_output_flow(sock_net(sk
), fl4
, sk
);
341 static inline int inet_iif(const struct sk_buff
*skb
)
343 struct rtable
*rt
= skb_rtable(skb
);
345 if (rt
&& rt
->rt_iif
)
351 static inline int ip4_dst_hoplimit(const struct dst_entry
*dst
)
353 int hoplimit
= dst_metric_raw(dst
, RTAX_HOPLIMIT
);
354 struct net
*net
= dev_net(dst
->dev
);
357 hoplimit
= net
->ipv4
.sysctl_ip_default_ttl
;
361 static inline struct neighbour
*ip_neigh_gw4(struct net_device
*dev
,
364 struct neighbour
*neigh
;
366 neigh
= __ipv4_neigh_lookup_noref(dev
, daddr
);
367 if (unlikely(!neigh
))
368 neigh
= __neigh_create(&arp_tbl
, &daddr
, dev
, false);
373 static inline struct neighbour
*ip_neigh_for_gw(struct rtable
*rt
,
377 struct net_device
*dev
= rt
->dst
.dev
;
378 struct neighbour
*neigh
;
380 if (likely(rt
->rt_gw_family
== AF_INET
)) {
381 neigh
= ip_neigh_gw4(dev
, rt
->rt_gw4
);
382 } else if (rt
->rt_gw_family
== AF_INET6
) {
383 neigh
= ip_neigh_gw6(dev
, &rt
->rt_gw6
);
386 neigh
= ip_neigh_gw4(dev
, ip_hdr(skb
)->daddr
);
391 #endif /* _ROUTE_H */