1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Definitions for the IP module.
9 * Version: @(#)ip.h 1.0.2 05/07/93
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Mike McLagan : Routing by source
21 #include <linux/types.h>
24 #include <linux/skbuff.h>
25 #include <linux/jhash.h>
26 #include <linux/sockptr.h>
28 #include <net/inet_sock.h>
29 #include <net/route.h>
32 #include <net/flow_dissector.h>
33 #include <net/netns/hash.h>
35 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
36 #define IPV4_MIN_MTU 68 /* RFC 791 */
38 extern unsigned int sysctl_fib_sync_mem
;
39 extern unsigned int sysctl_fib_sync_mem_min
;
40 extern unsigned int sysctl_fib_sync_mem_max
;
44 struct inet_skb_parm
{
46 struct ip_options opt
; /* Compiled IP options */
49 #define IPSKB_FORWARDED BIT(0)
50 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
51 #define IPSKB_XFRM_TRANSFORMED BIT(2)
52 #define IPSKB_FRAG_COMPLETE BIT(3)
53 #define IPSKB_REROUTED BIT(4)
54 #define IPSKB_DOREDIRECT BIT(5)
55 #define IPSKB_FRAG_PMTU BIT(6)
56 #define IPSKB_L3SLAVE BIT(7)
61 static inline bool ipv4_l3mdev_skb(u16 flags
)
63 return !!(flags
& IPSKB_L3SLAVE
);
66 static inline unsigned int ip_hdrlen(const struct sk_buff
*skb
)
68 return ip_hdr(skb
)->ihl
* 4;
72 struct sockcm_cookie sockc
;
75 struct ip_options_rcu
*opt
;
82 static inline void ipcm_init(struct ipcm_cookie
*ipcm
)
84 *ipcm
= (struct ipcm_cookie
) { .tos
= -1 };
87 static inline void ipcm_init_sk(struct ipcm_cookie
*ipcm
,
88 const struct inet_sock
*inet
)
92 ipcm
->sockc
.mark
= inet
->sk
.sk_mark
;
93 ipcm
->sockc
.tsflags
= inet
->sk
.sk_tsflags
;
94 ipcm
->oif
= inet
->sk
.sk_bound_dev_if
;
95 ipcm
->addr
= inet
->inet_saddr
;
98 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
99 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
101 /* return enslaved device index if relevant */
102 static inline int inet_sdif(struct sk_buff
*skb
)
104 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
105 if (skb
&& ipv4_l3mdev_skb(IPCB(skb
)->flags
))
106 return IPCB(skb
)->iif
;
111 /* Special input handler for packets caught by router alert option.
112 They are selected only by protocol field, and then processed likely
113 local ones; but only if someone wants them! Otherwise, router
114 not running rsvpd will kill RSVP.
116 It is user level problem, what it will make with them.
117 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
118 but receiver should be enough clever f.e. to forward mtrace requests,
119 sent to multicast group to reach destination designated router.
123 struct ip_ra_chain __rcu
*next
;
126 void (*destructor
)(struct sock
*);
127 struct sock
*saved_sk
;
133 #define IP_CE 0x8000 /* Flag: "Congestion" */
134 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
135 #define IP_MF 0x2000 /* Flag: "More Fragments" */
136 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
138 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
146 int igmp_mc_init(void);
149 * Functions provided by ip.c
152 int ip_build_and_send_pkt(struct sk_buff
*skb
, const struct sock
*sk
,
153 __be32 saddr
, __be32 daddr
,
154 struct ip_options_rcu
*opt
);
155 int ip_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
,
156 struct net_device
*orig_dev
);
157 void ip_list_rcv(struct list_head
*head
, struct packet_type
*pt
,
158 struct net_device
*orig_dev
);
159 int ip_local_deliver(struct sk_buff
*skb
);
160 void ip_protocol_deliver_rcu(struct net
*net
, struct sk_buff
*skb
, int proto
);
161 int ip_mr_input(struct sk_buff
*skb
);
162 int ip_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
);
163 int ip_mc_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
);
164 int ip_do_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
165 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*));
167 struct ip_fraglist_iter
{
168 struct sk_buff
*frag
;
174 void ip_fraglist_init(struct sk_buff
*skb
, struct iphdr
*iph
,
175 unsigned int hlen
, struct ip_fraglist_iter
*iter
);
176 void ip_fraglist_prepare(struct sk_buff
*skb
, struct ip_fraglist_iter
*iter
);
178 static inline struct sk_buff
*ip_fraglist_next(struct ip_fraglist_iter
*iter
)
180 struct sk_buff
*skb
= iter
->frag
;
182 iter
->frag
= skb
->next
;
183 skb_mark_not_on_list(skb
);
188 struct ip_frag_state
{
196 __be16 not_last_frag
;
199 void ip_frag_init(struct sk_buff
*skb
, unsigned int hlen
, unsigned int ll_rs
,
200 unsigned int mtu
, bool DF
, struct ip_frag_state
*state
);
201 struct sk_buff
*ip_frag_next(struct sk_buff
*skb
,
202 struct ip_frag_state
*state
);
204 void ip_send_check(struct iphdr
*ip
);
205 int __ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
);
206 int ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
);
208 int __ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
211 int ip_append_data(struct sock
*sk
, struct flowi4
*fl4
,
212 int getfrag(void *from
, char *to
, int offset
, int len
,
213 int odd
, struct sk_buff
*skb
),
214 void *from
, int len
, int protolen
,
215 struct ipcm_cookie
*ipc
,
218 int ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
,
219 struct sk_buff
*skb
);
220 ssize_t
ip_append_page(struct sock
*sk
, struct flowi4
*fl4
, struct page
*page
,
221 int offset
, size_t size
, int flags
);
222 struct sk_buff
*__ip_make_skb(struct sock
*sk
, struct flowi4
*fl4
,
223 struct sk_buff_head
*queue
,
224 struct inet_cork
*cork
);
225 int ip_send_skb(struct net
*net
, struct sk_buff
*skb
);
226 int ip_push_pending_frames(struct sock
*sk
, struct flowi4
*fl4
);
227 void ip_flush_pending_frames(struct sock
*sk
);
228 struct sk_buff
*ip_make_skb(struct sock
*sk
, struct flowi4
*fl4
,
229 int getfrag(void *from
, char *to
, int offset
,
230 int len
, int odd
, struct sk_buff
*skb
),
231 void *from
, int length
, int transhdrlen
,
232 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
233 struct inet_cork
*cork
, unsigned int flags
);
235 int ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
);
237 static inline struct sk_buff
*ip_finish_skb(struct sock
*sk
, struct flowi4
*fl4
)
239 return __ip_make_skb(sk
, fl4
, &sk
->sk_write_queue
, &inet_sk(sk
)->cork
.base
);
242 static inline __u8
get_rttos(struct ipcm_cookie
* ipc
, struct inet_sock
*inet
)
244 return (ipc
->tos
!= -1) ? RT_TOS(ipc
->tos
) : RT_TOS(inet
->tos
);
247 static inline __u8
get_rtconn_flags(struct ipcm_cookie
* ipc
, struct sock
* sk
)
249 return (ipc
->tos
!= -1) ? RT_CONN_FLAGS_TOS(sk
, ipc
->tos
) : RT_CONN_FLAGS(sk
);
253 int __ip4_datagram_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
);
254 int ip4_datagram_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
);
256 void ip4_datagram_release_cb(struct sock
*sk
);
258 struct ip_reply_arg
{
262 int csumoffset
; /* u16 offset of csum in iov[0].iov_base */
263 /* -1 if not needed */
269 #define IP_REPLY_ARG_NOSRCCHECK 1
271 static inline __u8
ip_reply_arg_flowi_flags(const struct ip_reply_arg
*arg
)
273 return (arg
->flags
& IP_REPLY_ARG_NOSRCCHECK
) ? FLOWI_FLAG_ANYSRC
: 0;
276 void ip_send_unicast_reply(struct sock
*sk
, struct sk_buff
*skb
,
277 const struct ip_options
*sopt
,
278 __be32 daddr
, __be32 saddr
,
279 const struct ip_reply_arg
*arg
,
280 unsigned int len
, u64 transmit_time
);
282 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
283 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
284 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
285 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
286 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
287 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
288 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
289 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
290 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
291 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
293 u64
snmp_get_cpu_field(void __percpu
*mib
, int cpu
, int offct
);
294 unsigned long snmp_fold_field(void __percpu
*mib
, int offt
);
295 #if BITS_PER_LONG==32
296 u64
snmp_get_cpu_field64(void __percpu
*mib
, int cpu
, int offct
,
297 size_t syncp_offset
);
298 u64
snmp_fold_field64(void __percpu
*mib
, int offt
, size_t sync_off
);
300 static inline u64
snmp_get_cpu_field64(void __percpu
*mib
, int cpu
, int offct
,
303 return snmp_get_cpu_field(mib
, cpu
, offct
);
307 static inline u64
snmp_fold_field64(void __percpu
*mib
, int offt
, size_t syncp_off
)
309 return snmp_fold_field(mib
, offt
);
313 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
316 for_each_possible_cpu(c) { \
317 for (i = 0; stats_list[i].name; i++) \
318 buff64[i] += snmp_get_cpu_field64( \
320 c, stats_list[i].entry, \
325 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
328 for_each_possible_cpu(c) { \
329 for (i = 0; stats_list[i].name; i++) \
330 buff[i] += snmp_get_cpu_field( \
332 c, stats_list[i].entry); \
336 void inet_get_local_port_range(struct net
*net
, int *low
, int *high
);
339 static inline bool inet_is_local_reserved_port(struct net
*net
, unsigned short port
)
341 if (!net
->ipv4
.sysctl_local_reserved_ports
)
343 return test_bit(port
, net
->ipv4
.sysctl_local_reserved_ports
);
346 static inline bool sysctl_dev_name_is_allowed(const char *name
)
348 return strcmp(name
, "default") != 0 && strcmp(name
, "all") != 0;
351 static inline bool inet_port_requires_bind_service(struct net
*net
, unsigned short port
)
353 return port
< net
->ipv4
.sysctl_ip_prot_sock
;
357 static inline bool inet_is_local_reserved_port(struct net
*net
, unsigned short port
)
362 static inline bool inet_port_requires_bind_service(struct net
*net
, unsigned short port
)
364 return port
< PROT_SOCK
;
368 __be32
inet_current_timestamp(void);
370 /* From inetpeer.c */
371 extern int inet_peer_threshold
;
372 extern int inet_peer_minttl
;
373 extern int inet_peer_maxttl
;
375 void ipfrag_init(void);
377 void ip_static_sysctl_init(void);
379 #define IP4_REPLY_MARK(net, mark) \
380 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
382 static inline bool ip_is_fragment(const struct iphdr
*iph
)
384 return (iph
->frag_off
& htons(IP_MF
| IP_OFFSET
)) != 0;
390 /* The function in 2.2 was invalid, producing wrong result for
391 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
393 int ip_decrease_ttl(struct iphdr
*iph
)
395 u32 check
= (__force u32
)iph
->check
;
396 check
+= (__force u32
)htons(0x0100);
397 iph
->check
= (__force __sum16
)(check
+ (check
>=0xFFFF));
401 static inline int ip_mtu_locked(const struct dst_entry
*dst
)
403 const struct rtable
*rt
= (const struct rtable
*)dst
;
405 return rt
->rt_mtu_locked
|| dst_metric_locked(dst
, RTAX_MTU
);
409 int ip_dont_fragment(const struct sock
*sk
, const struct dst_entry
*dst
)
411 u8 pmtudisc
= READ_ONCE(inet_sk(sk
)->pmtudisc
);
413 return pmtudisc
== IP_PMTUDISC_DO
||
414 (pmtudisc
== IP_PMTUDISC_WANT
&&
415 !ip_mtu_locked(dst
));
418 static inline bool ip_sk_accept_pmtu(const struct sock
*sk
)
420 return inet_sk(sk
)->pmtudisc
!= IP_PMTUDISC_INTERFACE
&&
421 inet_sk(sk
)->pmtudisc
!= IP_PMTUDISC_OMIT
;
424 static inline bool ip_sk_use_pmtu(const struct sock
*sk
)
426 return inet_sk(sk
)->pmtudisc
< IP_PMTUDISC_PROBE
;
429 static inline bool ip_sk_ignore_df(const struct sock
*sk
)
431 return inet_sk(sk
)->pmtudisc
< IP_PMTUDISC_DO
||
432 inet_sk(sk
)->pmtudisc
== IP_PMTUDISC_OMIT
;
435 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry
*dst
,
438 struct net
*net
= dev_net(dst
->dev
);
440 if (net
->ipv4
.sysctl_ip_fwd_use_pmtu
||
441 ip_mtu_locked(dst
) ||
445 return min(READ_ONCE(dst
->dev
->mtu
), IP_MAX_MTU
);
448 static inline unsigned int ip_skb_dst_mtu(struct sock
*sk
,
449 const struct sk_buff
*skb
)
451 if (!sk
|| !sk_fullsock(sk
) || ip_sk_use_pmtu(sk
)) {
452 bool forwarding
= IPCB(skb
)->flags
& IPSKB_FORWARDED
;
454 return ip_dst_mtu_maybe_forward(skb_dst(skb
), forwarding
);
457 return min(READ_ONCE(skb_dst(skb
)->dev
->mtu
), IP_MAX_MTU
);
460 struct dst_metrics
*ip_fib_metrics_init(struct net
*net
, struct nlattr
*fc_mx
,
462 struct netlink_ext_ack
*extack
);
463 static inline void ip_fib_metrics_put(struct dst_metrics
*fib_metrics
)
465 if (fib_metrics
!= &dst_default_metrics
&&
466 refcount_dec_and_test(&fib_metrics
->refcnt
))
470 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
472 void ip_dst_init_metrics(struct dst_entry
*dst
, struct dst_metrics
*fib_metrics
)
474 dst_init_metrics(dst
, fib_metrics
->metrics
, true);
476 if (fib_metrics
!= &dst_default_metrics
) {
477 dst
->_metrics
|= DST_METRICS_REFCOUNTED
;
478 refcount_inc(&fib_metrics
->refcnt
);
483 void ip_dst_metrics_put(struct dst_entry
*dst
)
485 struct dst_metrics
*p
= (struct dst_metrics
*)DST_METRICS_PTR(dst
);
487 if (p
!= &dst_default_metrics
&& refcount_dec_and_test(&p
->refcnt
))
491 u32
ip_idents_reserve(u32 hash
, int segs
);
492 void __ip_select_ident(struct net
*net
, struct iphdr
*iph
, int segs
);
494 static inline void ip_select_ident_segs(struct net
*net
, struct sk_buff
*skb
,
495 struct sock
*sk
, int segs
)
497 struct iphdr
*iph
= ip_hdr(skb
);
499 if ((iph
->frag_off
& htons(IP_DF
)) && !skb
->ignore_df
) {
500 /* This is only to work around buggy Windows95/2000
501 * VJ compression implementations. If the ID field
502 * does not change, they drop every other packet in
503 * a TCP stream using header compression.
505 if (sk
&& inet_sk(sk
)->inet_daddr
) {
506 iph
->id
= htons(inet_sk(sk
)->inet_id
);
507 inet_sk(sk
)->inet_id
+= segs
;
512 __ip_select_ident(net
, iph
, segs
);
516 static inline void ip_select_ident(struct net
*net
, struct sk_buff
*skb
,
519 ip_select_ident_segs(net
, skb
, sk
, 1);
522 static inline __wsum
inet_compute_pseudo(struct sk_buff
*skb
, int proto
)
524 return csum_tcpudp_nofold(ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
528 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
529 * Equivalent to : flow->v4addrs.src = iph->saddr;
530 * flow->v4addrs.dst = iph->daddr;
532 static inline void iph_to_flow_copy_v4addrs(struct flow_keys
*flow
,
533 const struct iphdr
*iph
)
535 BUILD_BUG_ON(offsetof(typeof(flow
->addrs
), v4addrs
.dst
) !=
536 offsetof(typeof(flow
->addrs
), v4addrs
.src
) +
537 sizeof(flow
->addrs
.v4addrs
.src
));
538 memcpy(&flow
->addrs
.v4addrs
, &iph
->saddr
, sizeof(flow
->addrs
.v4addrs
));
539 flow
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
542 static inline __wsum
inet_gro_compute_pseudo(struct sk_buff
*skb
, int proto
)
544 const struct iphdr
*iph
= skb_gro_network_header(skb
);
546 return csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
547 skb_gro_len(skb
), proto
, 0);
551 * Map a multicast IP onto multicast MAC for type ethernet.
554 static inline void ip_eth_mc_map(__be32 naddr
, char *buf
)
556 __u32 addr
=ntohl(naddr
);
568 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
569 * Leave P_Key as 0 to be filled in by driver.
572 static inline void ip_ib_mc_map(__be32 naddr
, const unsigned char *broadcast
, char *buf
)
575 unsigned char scope
= broadcast
[5] & 0xF;
577 buf
[0] = 0; /* Reserved */
578 buf
[1] = 0xff; /* Multicast QPN */
583 buf
[5] = 0x10 | scope
; /* scope from broadcast address */
584 buf
[6] = 0x40; /* IPv4 signature */
586 buf
[8] = broadcast
[8]; /* P_Key */
587 buf
[9] = broadcast
[9];
594 buf
[19] = addr
& 0xff;
596 buf
[18] = addr
& 0xff;
598 buf
[17] = addr
& 0xff;
600 buf
[16] = addr
& 0x0f;
603 static inline void ip_ipgre_mc_map(__be32 naddr
, const unsigned char *broadcast
, char *buf
)
605 if ((broadcast
[0] | broadcast
[1] | broadcast
[2] | broadcast
[3]) != 0)
606 memcpy(buf
, broadcast
, 4);
608 memcpy(buf
, &naddr
, sizeof(naddr
));
611 #if IS_ENABLED(CONFIG_IPV6)
612 #include <linux/ipv6.h>
615 static __inline__
void inet_reset_saddr(struct sock
*sk
)
617 inet_sk(sk
)->inet_rcv_saddr
= inet_sk(sk
)->inet_saddr
= 0;
618 #if IS_ENABLED(CONFIG_IPV6)
619 if (sk
->sk_family
== PF_INET6
) {
620 struct ipv6_pinfo
*np
= inet6_sk(sk
);
622 memset(&np
->saddr
, 0, sizeof(np
->saddr
));
623 memset(&sk
->sk_v6_rcv_saddr
, 0, sizeof(sk
->sk_v6_rcv_saddr
));
630 static inline unsigned int ipv4_addr_hash(__be32 ip
)
632 return (__force
unsigned int) ip
;
635 static inline u32
ipv4_portaddr_hash(const struct net
*net
,
639 return jhash_1word((__force u32
)saddr
, net_hash_mix(net
)) ^ port
;
642 bool ip_call_ra_chain(struct sk_buff
*skb
);
645 * Functions provided by ip_fragment.c
648 enum ip_defrag_users
{
649 IP_DEFRAG_LOCAL_DELIVER
,
650 IP_DEFRAG_CALL_RA_CHAIN
,
651 IP_DEFRAG_CONNTRACK_IN
,
652 __IP_DEFRAG_CONNTRACK_IN_END
= IP_DEFRAG_CONNTRACK_IN
+ USHRT_MAX
,
653 IP_DEFRAG_CONNTRACK_OUT
,
654 __IP_DEFRAG_CONNTRACK_OUT_END
= IP_DEFRAG_CONNTRACK_OUT
+ USHRT_MAX
,
655 IP_DEFRAG_CONNTRACK_BRIDGE_IN
,
656 __IP_DEFRAG_CONNTRACK_BRIDGE_IN
= IP_DEFRAG_CONNTRACK_BRIDGE_IN
+ USHRT_MAX
,
664 /* Return true if the value of 'user' is between 'lower_bond'
665 * and 'upper_bond' inclusively.
667 static inline bool ip_defrag_user_in_between(u32 user
,
668 enum ip_defrag_users lower_bond
,
669 enum ip_defrag_users upper_bond
)
671 return user
>= lower_bond
&& user
<= upper_bond
;
674 int ip_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
);
676 struct sk_buff
*ip_check_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
);
678 static inline struct sk_buff
*ip_check_defrag(struct net
*net
, struct sk_buff
*skb
, u32 user
)
685 * Functions provided by ip_forward.c
688 int ip_forward(struct sk_buff
*skb
);
691 * Functions provided by ip_options.c
694 void ip_options_build(struct sk_buff
*skb
, struct ip_options
*opt
,
695 __be32 daddr
, struct rtable
*rt
, int is_frag
);
697 int __ip_options_echo(struct net
*net
, struct ip_options
*dopt
,
698 struct sk_buff
*skb
, const struct ip_options
*sopt
);
699 static inline int ip_options_echo(struct net
*net
, struct ip_options
*dopt
,
702 return __ip_options_echo(net
, dopt
, skb
, &IPCB(skb
)->opt
);
705 void ip_options_fragment(struct sk_buff
*skb
);
706 int __ip_options_compile(struct net
*net
, struct ip_options
*opt
,
707 struct sk_buff
*skb
, __be32
*info
);
708 int ip_options_compile(struct net
*net
, struct ip_options
*opt
,
709 struct sk_buff
*skb
);
710 int ip_options_get(struct net
*net
, struct ip_options_rcu
**optp
,
711 sockptr_t data
, int optlen
);
712 void ip_options_undo(struct ip_options
*opt
);
713 void ip_forward_options(struct sk_buff
*skb
);
714 int ip_options_rcv_srr(struct sk_buff
*skb
, struct net_device
*dev
);
717 * Functions provided by ip_sockglue.c
720 void ipv4_pktinfo_prepare(const struct sock
*sk
, struct sk_buff
*skb
);
721 void ip_cmsg_recv_offset(struct msghdr
*msg
, struct sock
*sk
,
722 struct sk_buff
*skb
, int tlen
, int offset
);
723 int ip_cmsg_send(struct sock
*sk
, struct msghdr
*msg
,
724 struct ipcm_cookie
*ipc
, bool allow_ipv6
);
725 int ip_setsockopt(struct sock
*sk
, int level
, int optname
, sockptr_t optval
,
726 unsigned int optlen
);
727 int ip_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
729 int ip_ra_control(struct sock
*sk
, unsigned char on
,
730 void (*destructor
)(struct sock
*));
732 int ip_recv_error(struct sock
*sk
, struct msghdr
*msg
, int len
, int *addr_len
);
733 void ip_icmp_error(struct sock
*sk
, struct sk_buff
*skb
, int err
, __be16 port
,
734 u32 info
, u8
*payload
);
735 void ip_local_error(struct sock
*sk
, int err
, __be32 daddr
, __be16 dport
,
738 static inline void ip_cmsg_recv(struct msghdr
*msg
, struct sk_buff
*skb
)
740 ip_cmsg_recv_offset(msg
, skb
->sk
, skb
, 0, 0);
743 bool icmp_global_allow(void);
744 extern int sysctl_icmp_msgs_per_sec
;
745 extern int sysctl_icmp_msgs_burst
;
747 #ifdef CONFIG_PROC_FS
748 int ip_misc_proc_init(void);
751 int rtm_getroute_parse_ip_proto(struct nlattr
*attr
, u8
*ip_proto
, u8 family
,
752 struct netlink_ext_ack
*extack
);
754 static inline bool inetdev_valid_mtu(unsigned int mtu
)
756 return likely(mtu
>= IPV4_MIN_MTU
);
759 void ip_sock_set_freebind(struct sock
*sk
);
760 int ip_sock_set_mtu_discover(struct sock
*sk
, int val
);
761 void ip_sock_set_pktinfo(struct sock
*sk
);
762 void ip_sock_set_recverr(struct sock
*sk
);
763 void ip_sock_set_tos(struct sock
*sk
, int val
);