1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_NETFILTER_H
3 #define __LINUX_NETFILTER_H
5 #include <linux/init.h>
6 #include <linux/skbuff.h>
10 #include <linux/in6.h>
11 #include <linux/wait.h>
12 #include <linux/list.h>
13 #include <linux/static_key.h>
14 #include <linux/netfilter_defs.h>
15 #include <linux/netdevice.h>
16 #include <net/net_namespace.h>
18 #ifdef CONFIG_NETFILTER
19 static inline int NF_DROP_GETERR(int verdict
)
21 return -(verdict
>> NF_VERDICT_QBITS
);
24 static inline int nf_inet_addr_cmp(const union nf_inet_addr
*a1
,
25 const union nf_inet_addr
*a2
)
27 return a1
->all
[0] == a2
->all
[0] &&
28 a1
->all
[1] == a2
->all
[1] &&
29 a1
->all
[2] == a2
->all
[2] &&
30 a1
->all
[3] == a2
->all
[3];
33 static inline void nf_inet_addr_mask(const union nf_inet_addr
*a1
,
34 union nf_inet_addr
*result
,
35 const union nf_inet_addr
*mask
)
37 result
->all
[0] = a1
->all
[0] & mask
->all
[0];
38 result
->all
[1] = a1
->all
[1] & mask
->all
[1];
39 result
->all
[2] = a1
->all
[2] & mask
->all
[2];
40 result
->all
[3] = a1
->all
[3] & mask
->all
[3];
43 int netfilter_init(void);
51 struct nf_hook_state
{
54 struct net_device
*in
;
55 struct net_device
*out
;
58 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*);
61 typedef unsigned int nf_hookfn(void *priv
,
63 const struct nf_hook_state
*state
);
65 /* User fills in from here down. */
67 struct net_device
*dev
;
72 /* Hooks are ordered in ascending priority. */
76 struct nf_hook_entry
{
81 struct nf_hook_entries_rcu_head
{
86 struct nf_hook_entries
{
89 struct nf_hook_entry hooks
[];
91 /* trailer: pointers to original orig_ops of each hook,
92 * followed by rcu_head and scratch space used for freeing
93 * the structure via call_rcu.
95 * This is not part of struct nf_hook_entry since its only
96 * needed in slow path (hook register/unregister):
97 * const struct nf_hook_ops *orig_ops[]
99 * For the same reason, we store this at end -- its
100 * only needed when a hook is deleted, not during
101 * packet path processing:
102 * struct nf_hook_entries_rcu_head head
106 static inline struct nf_hook_ops
**nf_hook_entries_get_hook_ops(const struct nf_hook_entries
*e
)
108 unsigned int n
= e
->num_hook_entries
;
109 const void *hook_end
;
111 hook_end
= &e
->hooks
[n
]; /* this is *past* ->hooks[]! */
113 return (struct nf_hook_ops
**)hook_end
;
117 nf_hook_entry_hookfn(const struct nf_hook_entry
*entry
, struct sk_buff
*skb
,
118 struct nf_hook_state
*state
)
120 return entry
->hook(entry
->priv
, skb
, state
);
123 static inline void nf_hook_state_init(struct nf_hook_state
*p
,
126 struct net_device
*indev
,
127 struct net_device
*outdev
,
130 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*))
143 struct nf_sockopt_ops
{
144 struct list_head list
;
148 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
151 int (*set
)(struct sock
*sk
, int optval
, void __user
*user
, unsigned int len
);
153 int (*compat_set
)(struct sock
*sk
, int optval
,
154 void __user
*user
, unsigned int len
);
158 int (*get
)(struct sock
*sk
, int optval
, void __user
*user
, int *len
);
160 int (*compat_get
)(struct sock
*sk
, int optval
,
161 void __user
*user
, int *len
);
163 /* Use the module struct to lock set/get code in place */
164 struct module
*owner
;
167 /* Function to register/unregister hook points. */
168 int nf_register_net_hook(struct net
*net
, const struct nf_hook_ops
*ops
);
169 void nf_unregister_net_hook(struct net
*net
, const struct nf_hook_ops
*ops
);
170 int nf_register_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
172 void nf_unregister_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
175 /* Functions to register get/setsockopt ranges (non-inclusive). You
176 need to check permissions yourself! */
177 int nf_register_sockopt(struct nf_sockopt_ops
*reg
);
178 void nf_unregister_sockopt(struct nf_sockopt_ops
*reg
);
180 #ifdef HAVE_JUMP_LABEL
181 extern struct static_key nf_hooks_needed
[NFPROTO_NUMPROTO
][NF_MAX_HOOKS
];
184 int nf_hook_slow(struct sk_buff
*skb
, struct nf_hook_state
*state
,
185 const struct nf_hook_entries
*e
, unsigned int i
);
188 * nf_hook - call a netfilter hook
190 * Returns 1 if the hook has allowed the packet to pass. The function
191 * okfn must be invoked by the caller in this case. Any other return
192 * value indicates the packet has been consumed by the hook.
194 static inline int nf_hook(u_int8_t pf
, unsigned int hook
, struct net
*net
,
195 struct sock
*sk
, struct sk_buff
*skb
,
196 struct net_device
*indev
, struct net_device
*outdev
,
197 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*))
199 struct nf_hook_entries
*hook_head
= NULL
;
202 #ifdef HAVE_JUMP_LABEL
203 if (__builtin_constant_p(pf
) &&
204 __builtin_constant_p(hook
) &&
205 !static_key_false(&nf_hooks_needed
[pf
][hook
]))
212 hook_head
= rcu_dereference(net
->nf
.hooks_ipv4
[hook
]);
215 hook_head
= rcu_dereference(net
->nf
.hooks_ipv6
[hook
]);
218 #ifdef CONFIG_NETFILTER_FAMILY_ARP
219 hook_head
= rcu_dereference(net
->nf
.hooks_arp
[hook
]);
223 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
224 hook_head
= rcu_dereference(net
->nf
.hooks_bridge
[hook
]);
227 #if IS_ENABLED(CONFIG_DECNET)
229 hook_head
= rcu_dereference(net
->nf
.hooks_decnet
[hook
]);
238 struct nf_hook_state state
;
240 nf_hook_state_init(&state
, hook
, pf
, indev
, outdev
,
243 ret
= nf_hook_slow(skb
, &state
, hook_head
, 0);
250 /* Activate hook; either okfn or kfree_skb called, unless a hook
251 returns NF_STOLEN (in which case, it's up to the hook to deal with
254 Returns -ERRNO if packet dropped. Zero means queued, stolen or
259 > I don't want nf_hook to return anything because people might forget
260 > about async and trust the return value to mean "packet was ok".
263 Just document it clearly, then you can expect some sense from kernel
268 NF_HOOK_COND(uint8_t pf
, unsigned int hook
, struct net
*net
, struct sock
*sk
,
269 struct sk_buff
*skb
, struct net_device
*in
, struct net_device
*out
,
270 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*),
276 ((ret
= nf_hook(pf
, hook
, net
, sk
, skb
, in
, out
, okfn
)) == 1))
277 ret
= okfn(net
, sk
, skb
);
282 NF_HOOK(uint8_t pf
, unsigned int hook
, struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
283 struct net_device
*in
, struct net_device
*out
,
284 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*))
286 int ret
= nf_hook(pf
, hook
, net
, sk
, skb
, in
, out
, okfn
);
288 ret
= okfn(net
, sk
, skb
);
292 /* Call setsockopt() */
293 int nf_setsockopt(struct sock
*sk
, u_int8_t pf
, int optval
, char __user
*opt
,
295 int nf_getsockopt(struct sock
*sk
, u_int8_t pf
, int optval
, char __user
*opt
,
298 int compat_nf_setsockopt(struct sock
*sk
, u_int8_t pf
, int optval
,
299 char __user
*opt
, unsigned int len
);
300 int compat_nf_getsockopt(struct sock
*sk
, u_int8_t pf
, int optval
,
301 char __user
*opt
, int *len
);
304 /* Call this before modifying an existing packet: ensures it is
305 modifiable and linear to the point you care about (writable_len).
306 Returns true or false. */
307 int skb_make_writable(struct sk_buff
*skb
, unsigned int writable_len
);
310 struct nf_queue_entry
;
312 __sum16
nf_checksum(struct sk_buff
*skb
, unsigned int hook
,
313 unsigned int dataoff
, u_int8_t protocol
,
314 unsigned short family
);
316 __sum16
nf_checksum_partial(struct sk_buff
*skb
, unsigned int hook
,
317 unsigned int dataoff
, unsigned int len
,
318 u_int8_t protocol
, unsigned short family
);
319 int nf_route(struct net
*net
, struct dst_entry
**dst
, struct flowi
*fl
,
320 bool strict
, unsigned short family
);
321 int nf_reroute(struct sk_buff
*skb
, struct nf_queue_entry
*entry
);
323 #include <net/flow.h>
324 extern void (*nf_nat_decode_session_hook
)(struct sk_buff
*, struct flowi
*);
327 nf_nat_decode_session(struct sk_buff
*skb
, struct flowi
*fl
, u_int8_t family
)
329 #ifdef CONFIG_NF_NAT_NEEDED
330 void (*decodefn
)(struct sk_buff
*, struct flowi
*);
333 decodefn
= rcu_dereference(nf_nat_decode_session_hook
);
340 #else /* !CONFIG_NETFILTER */
342 NF_HOOK_COND(uint8_t pf
, unsigned int hook
, struct net
*net
, struct sock
*sk
,
343 struct sk_buff
*skb
, struct net_device
*in
, struct net_device
*out
,
344 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*),
347 return okfn(net
, sk
, skb
);
351 NF_HOOK(uint8_t pf
, unsigned int hook
, struct net
*net
, struct sock
*sk
,
352 struct sk_buff
*skb
, struct net_device
*in
, struct net_device
*out
,
353 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*))
355 return okfn(net
, sk
, skb
);
358 static inline int nf_hook(u_int8_t pf
, unsigned int hook
, struct net
*net
,
359 struct sock
*sk
, struct sk_buff
*skb
,
360 struct net_device
*indev
, struct net_device
*outdev
,
361 int (*okfn
)(struct net
*, struct sock
*, struct sk_buff
*))
367 nf_nat_decode_session(struct sk_buff
*skb
, struct flowi
*fl
, u_int8_t family
)
370 #endif /*CONFIG_NETFILTER*/
372 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
373 #include <linux/netfilter/nf_conntrack_zones_common.h>
375 extern void (*ip_ct_attach
)(struct sk_buff
*, const struct sk_buff
*) __rcu
;
376 void nf_ct_attach(struct sk_buff
*, const struct sk_buff
*);
377 extern void (*nf_ct_destroy
)(struct nf_conntrack
*) __rcu
;
379 static inline void nf_ct_attach(struct sk_buff
*new, struct sk_buff
*skb
) {}
383 enum ip_conntrack_info
;
386 struct nfnl_ct_hook
{
387 struct nf_conn
*(*get_ct
)(const struct sk_buff
*skb
,
388 enum ip_conntrack_info
*ctinfo
);
389 size_t (*build_size
)(const struct nf_conn
*ct
);
390 int (*build
)(struct sk_buff
*skb
, struct nf_conn
*ct
,
391 enum ip_conntrack_info ctinfo
,
392 u_int16_t ct_attr
, u_int16_t ct_info_attr
);
393 int (*parse
)(const struct nlattr
*attr
, struct nf_conn
*ct
);
394 int (*attach_expect
)(const struct nlattr
*attr
, struct nf_conn
*ct
,
395 u32 portid
, u32 report
);
396 void (*seq_adjust
)(struct sk_buff
*skb
, struct nf_conn
*ct
,
397 enum ip_conntrack_info ctinfo
, s32 off
);
399 extern struct nfnl_ct_hook __rcu
*nfnl_ct_hook
;
402 * nf_skb_duplicated - TEE target has sent a packet
404 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
405 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
407 * This is used by xtables TEE target to prevent the duplicated skb from
408 * being duplicated again.
410 DECLARE_PER_CPU(bool, nf_skb_duplicated
);
412 #endif /*__LINUX_NETFILTER_H*/