1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Stateless NAT actions
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/netfilter.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/tc_act/tc_nat.h>
19 #include <net/act_api.h>
20 #include <net/pkt_cls.h>
23 #include <net/netlink.h>
24 #include <net/tc_act/tc_nat.h>
27 #include <net/tc_wrapper.h>
29 static struct tc_action_ops act_nat_ops
;
31 static const struct nla_policy nat_policy
[TCA_NAT_MAX
+ 1] = {
32 [TCA_NAT_PARMS
] = { .len
= sizeof(struct tc_nat
) },
35 static int tcf_nat_init(struct net
*net
, struct nlattr
*nla
, struct nlattr
*est
,
36 struct tc_action
**a
, struct tcf_proto
*tp
,
37 u32 flags
, struct netlink_ext_ack
*extack
)
39 struct tc_action_net
*tn
= net_generic(net
, act_nat_ops
.net_id
);
40 bool bind
= flags
& TCA_ACT_FLAGS_BIND
;
41 struct tcf_nat_parms
*nparm
, *oparm
;
42 struct nlattr
*tb
[TCA_NAT_MAX
+ 1];
43 struct tcf_chain
*goto_ch
= NULL
;
52 err
= nla_parse_nested_deprecated(tb
, TCA_NAT_MAX
, nla
, nat_policy
,
57 if (tb
[TCA_NAT_PARMS
] == NULL
)
59 parm
= nla_data(tb
[TCA_NAT_PARMS
]);
61 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
63 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
, &act_nat_ops
,
66 tcf_idr_cleanup(tn
, index
);
73 if (!(flags
& TCA_ACT_FLAGS_REPLACE
)) {
74 tcf_idr_release(*a
, bind
);
80 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
84 nparm
= kzalloc(sizeof(*nparm
), GFP_KERNEL
);
90 nparm
->old_addr
= parm
->old_addr
;
91 nparm
->new_addr
= parm
->new_addr
;
92 nparm
->mask
= parm
->mask
;
93 nparm
->flags
= parm
->flags
;
97 spin_lock_bh(&p
->tcf_lock
);
98 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
99 oparm
= rcu_replace_pointer(p
->parms
, nparm
, lockdep_is_held(&p
->tcf_lock
));
100 spin_unlock_bh(&p
->tcf_lock
);
103 tcf_chain_put_by_act(goto_ch
);
106 kfree_rcu(oparm
, rcu
);
110 tcf_idr_release(*a
, bind
);
114 TC_INDIRECT_SCOPE
int tcf_nat_act(struct sk_buff
*skb
,
115 const struct tc_action
*a
,
116 struct tcf_result
*res
)
118 struct tcf_nat
*p
= to_tcf_nat(a
);
119 struct tcf_nat_parms
*parms
;
130 tcf_lastuse_update(&p
->tcf_tm
);
131 tcf_action_update_bstats(&p
->common
, skb
);
133 action
= READ_ONCE(p
->tcf_action
);
135 parms
= rcu_dereference_bh(p
->parms
);
136 old_addr
= parms
->old_addr
;
137 new_addr
= parms
->new_addr
;
139 egress
= parms
->flags
& TCA_NAT_FLAG_EGRESS
;
141 if (unlikely(action
== TC_ACT_SHOT
))
144 noff
= skb_network_offset(skb
);
145 if (!pskb_may_pull(skb
, sizeof(*iph
) + noff
))
155 if (!((old_addr
^ addr
) & mask
)) {
156 if (skb_try_make_writable(skb
, sizeof(*iph
) + noff
))
160 new_addr
|= addr
& ~mask
;
162 /* Rewrite IP header */
165 iph
->saddr
= new_addr
;
167 iph
->daddr
= new_addr
;
169 csum_replace4(&iph
->check
, addr
, new_addr
);
170 } else if ((iph
->frag_off
& htons(IP_OFFSET
)) ||
171 iph
->protocol
!= IPPROTO_ICMP
) {
177 /* It would be nice to share code with stateful NAT. */
178 switch (iph
->frag_off
& htons(IP_OFFSET
) ? 0 : iph
->protocol
) {
183 if (!pskb_may_pull(skb
, ihl
+ sizeof(*tcph
) + noff
) ||
184 skb_try_make_writable(skb
, ihl
+ sizeof(*tcph
) + noff
))
187 tcph
= (void *)(skb_network_header(skb
) + ihl
);
188 inet_proto_csum_replace4(&tcph
->check
, skb
, addr
, new_addr
,
196 if (!pskb_may_pull(skb
, ihl
+ sizeof(*udph
) + noff
) ||
197 skb_try_make_writable(skb
, ihl
+ sizeof(*udph
) + noff
))
200 udph
= (void *)(skb_network_header(skb
) + ihl
);
201 if (udph
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
202 inet_proto_csum_replace4(&udph
->check
, skb
, addr
,
205 udph
->check
= CSUM_MANGLED_0
;
211 struct icmphdr
*icmph
;
213 if (!pskb_may_pull(skb
, ihl
+ sizeof(*icmph
) + noff
))
216 icmph
= (void *)(skb_network_header(skb
) + ihl
);
218 if (!icmp_is_err(icmph
->type
))
221 if (!pskb_may_pull(skb
, ihl
+ sizeof(*icmph
) + sizeof(*iph
) +
225 icmph
= (void *)(skb_network_header(skb
) + ihl
);
226 iph
= (void *)(icmph
+ 1);
232 if ((old_addr
^ addr
) & mask
)
235 if (skb_try_make_writable(skb
, ihl
+ sizeof(*icmph
) +
236 sizeof(*iph
) + noff
))
239 icmph
= (void *)(skb_network_header(skb
) + ihl
);
240 iph
= (void *)(icmph
+ 1);
243 new_addr
|= addr
& ~mask
;
245 /* XXX Fix up the inner checksums. */
247 iph
->daddr
= new_addr
;
249 iph
->saddr
= new_addr
;
251 inet_proto_csum_replace4(&icmph
->checksum
, skb
, addr
, new_addr
,
263 tcf_action_inc_drop_qstats(&p
->common
);
267 static int tcf_nat_dump(struct sk_buff
*skb
, struct tc_action
*a
,
270 unsigned char *b
= skb_tail_pointer(skb
);
271 struct tcf_nat
*p
= to_tcf_nat(a
);
272 struct tc_nat opt
= {
273 .index
= p
->tcf_index
,
274 .refcnt
= refcount_read(&p
->tcf_refcnt
) - ref
,
275 .bindcnt
= atomic_read(&p
->tcf_bindcnt
) - bind
,
277 struct tcf_nat_parms
*parms
;
280 spin_lock_bh(&p
->tcf_lock
);
282 opt
.action
= p
->tcf_action
;
284 parms
= rcu_dereference_protected(p
->parms
, lockdep_is_held(&p
->tcf_lock
));
286 opt
.old_addr
= parms
->old_addr
;
287 opt
.new_addr
= parms
->new_addr
;
288 opt
.mask
= parms
->mask
;
289 opt
.flags
= parms
->flags
;
291 if (nla_put(skb
, TCA_NAT_PARMS
, sizeof(opt
), &opt
))
292 goto nla_put_failure
;
294 tcf_tm_dump(&t
, &p
->tcf_tm
);
295 if (nla_put_64bit(skb
, TCA_NAT_TM
, sizeof(t
), &t
, TCA_NAT_PAD
))
296 goto nla_put_failure
;
297 spin_unlock_bh(&p
->tcf_lock
);
302 spin_unlock_bh(&p
->tcf_lock
);
307 static void tcf_nat_cleanup(struct tc_action
*a
)
309 struct tcf_nat
*p
= to_tcf_nat(a
);
310 struct tcf_nat_parms
*parms
;
312 parms
= rcu_dereference_protected(p
->parms
, 1);
314 kfree_rcu(parms
, rcu
);
317 static struct tc_action_ops act_nat_ops
= {
320 .owner
= THIS_MODULE
,
322 .dump
= tcf_nat_dump
,
323 .init
= tcf_nat_init
,
324 .cleanup
= tcf_nat_cleanup
,
325 .size
= sizeof(struct tcf_nat
),
327 MODULE_ALIAS_NET_ACT("nat");
329 static __net_init
int nat_init_net(struct net
*net
)
331 struct tc_action_net
*tn
= net_generic(net
, act_nat_ops
.net_id
);
333 return tc_action_net_init(net
, tn
, &act_nat_ops
);
336 static void __net_exit
nat_exit_net(struct list_head
*net_list
)
338 tc_action_net_exit(net_list
, act_nat_ops
.net_id
);
341 static struct pernet_operations nat_net_ops
= {
342 .init
= nat_init_net
,
343 .exit_batch
= nat_exit_net
,
344 .id
= &act_nat_ops
.net_id
,
345 .size
= sizeof(struct tc_action_net
),
348 MODULE_DESCRIPTION("Stateless NAT actions");
349 MODULE_LICENSE("GPL");
351 static int __init
nat_init_module(void)
353 return tcf_register_action(&act_nat_ops
, &nat_net_ops
);
356 static void __exit
nat_cleanup_module(void)
358 tcf_unregister_action(&act_nat_ops
, &nat_net_ops
);
361 module_init(nat_init_module
);
362 module_exit(nat_cleanup_module
);