1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_ipt.c iptables target interface
5 *TODO: Add other tables. For now we only support the ipv4 table targets
7 * Copyright: Jamal Hadi Salim (2002-13)
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_ipt.h>
22 #include <net/tc_act/tc_ipt.h>
24 #include <linux/netfilter_ipv4/ip_tables.h>
27 static unsigned int ipt_net_id
;
28 static struct tc_action_ops act_ipt_ops
;
30 static unsigned int xt_net_id
;
31 static struct tc_action_ops act_xt_ops
;
33 static int ipt_init_target(struct net
*net
, struct xt_entry_target
*t
,
34 char *table
, unsigned int hook
)
36 struct xt_tgchk_param par
;
37 struct xt_target
*target
;
38 struct ipt_entry e
= {};
41 target
= xt_request_find_target(AF_INET
, t
->u
.user
.name
,
44 return PTR_ERR(target
);
46 t
->u
.kernel
.target
= target
;
47 memset(&par
, 0, sizeof(par
));
52 par
.targinfo
= t
->data
;
54 par
.family
= NFPROTO_IPV4
;
56 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
58 module_put(t
->u
.kernel
.target
->me
);
64 static void ipt_destroy_target(struct xt_entry_target
*t
, struct net
*net
)
66 struct xt_tgdtor_param par
= {
67 .target
= t
->u
.kernel
.target
,
69 .family
= NFPROTO_IPV4
,
72 if (par
.target
->destroy
!= NULL
)
73 par
.target
->destroy(&par
);
74 module_put(par
.target
->me
);
77 static void tcf_ipt_release(struct tc_action
*a
)
79 struct tcf_ipt
*ipt
= to_ipt(a
);
82 ipt_destroy_target(ipt
->tcfi_t
, a
->idrinfo
->net
);
85 kfree(ipt
->tcfi_tname
);
88 static const struct nla_policy ipt_policy
[TCA_IPT_MAX
+ 1] = {
89 [TCA_IPT_TABLE
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
90 [TCA_IPT_HOOK
] = { .type
= NLA_U32
},
91 [TCA_IPT_INDEX
] = { .type
= NLA_U32
},
92 [TCA_IPT_TARG
] = { .len
= sizeof(struct xt_entry_target
) },
95 static int __tcf_ipt_init(struct net
*net
, unsigned int id
, struct nlattr
*nla
,
96 struct nlattr
*est
, struct tc_action
**a
,
97 const struct tc_action_ops
*ops
, int ovr
, int bind
,
98 struct tcf_proto
*tp
, u32 flags
)
100 struct tc_action_net
*tn
= net_generic(net
, id
);
101 struct nlattr
*tb
[TCA_IPT_MAX
+ 1];
103 struct xt_entry_target
*td
, *t
;
113 err
= nla_parse_nested_deprecated(tb
, TCA_IPT_MAX
, nla
, ipt_policy
,
118 if (tb
[TCA_IPT_INDEX
] != NULL
)
119 index
= nla_get_u32(tb
[TCA_IPT_INDEX
]);
121 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
128 if (tb
[TCA_IPT_HOOK
] == NULL
|| tb
[TCA_IPT_TARG
] == NULL
) {
130 tcf_idr_release(*a
, bind
);
132 tcf_idr_cleanup(tn
, index
);
136 td
= (struct xt_entry_target
*)nla_data(tb
[TCA_IPT_TARG
]);
137 if (nla_len(tb
[TCA_IPT_TARG
]) != td
->u
.target_size
) {
139 tcf_idr_release(*a
, bind
);
141 tcf_idr_cleanup(tn
, index
);
146 ret
= tcf_idr_create(tn
, index
, est
, a
, ops
, bind
,
149 tcf_idr_cleanup(tn
, index
);
154 if (bind
)/* dont override defaults */
158 tcf_idr_release(*a
, bind
);
162 hook
= nla_get_u32(tb
[TCA_IPT_HOOK
]);
165 tname
= kmalloc(IFNAMSIZ
, GFP_KERNEL
);
166 if (unlikely(!tname
))
168 if (tb
[TCA_IPT_TABLE
] == NULL
||
169 nla_strlcpy(tname
, tb
[TCA_IPT_TABLE
], IFNAMSIZ
) >= IFNAMSIZ
)
170 strcpy(tname
, "mangle");
172 t
= kmemdup(td
, td
->u
.target_size
, GFP_KERNEL
);
176 err
= ipt_init_target(net
, t
, tname
, hook
);
182 spin_lock_bh(&ipt
->tcf_lock
);
183 if (ret
!= ACT_P_CREATED
) {
184 ipt_destroy_target(ipt
->tcfi_t
, net
);
185 kfree(ipt
->tcfi_tname
);
188 ipt
->tcfi_tname
= tname
;
190 ipt
->tcfi_hook
= hook
;
191 spin_unlock_bh(&ipt
->tcf_lock
);
192 if (ret
== ACT_P_CREATED
)
193 tcf_idr_insert(tn
, *a
);
201 tcf_idr_release(*a
, bind
);
205 static int tcf_ipt_init(struct net
*net
, struct nlattr
*nla
,
206 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
207 int bind
, bool rtnl_held
, struct tcf_proto
*tp
,
208 u32 flags
, struct netlink_ext_ack
*extack
)
210 return __tcf_ipt_init(net
, ipt_net_id
, nla
, est
, a
, &act_ipt_ops
, ovr
,
214 static int tcf_xt_init(struct net
*net
, struct nlattr
*nla
,
215 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
216 int bind
, bool unlocked
, struct tcf_proto
*tp
,
217 u32 flags
, struct netlink_ext_ack
*extack
)
219 return __tcf_ipt_init(net
, xt_net_id
, nla
, est
, a
, &act_xt_ops
, ovr
,
223 static int tcf_ipt_act(struct sk_buff
*skb
, const struct tc_action
*a
,
224 struct tcf_result
*res
)
226 int ret
= 0, result
= 0;
227 struct tcf_ipt
*ipt
= to_ipt(a
);
228 struct xt_action_param par
;
229 struct nf_hook_state state
= {
230 .net
= dev_net(skb
->dev
),
232 .hook
= ipt
->tcfi_hook
,
236 if (skb_unclone(skb
, GFP_ATOMIC
))
237 return TC_ACT_UNSPEC
;
239 spin_lock(&ipt
->tcf_lock
);
241 tcf_lastuse_update(&ipt
->tcf_tm
);
242 bstats_update(&ipt
->tcf_bstats
, skb
);
244 /* yes, we have to worry about both in and out dev
245 * worry later - danger - this API seems to have changed
246 * from earlier kernels
249 par
.target
= ipt
->tcfi_t
->u
.kernel
.target
;
250 par
.targinfo
= ipt
->tcfi_t
->data
;
251 ret
= par
.target
->target(skb
, &par
);
258 result
= TC_ACT_SHOT
;
259 ipt
->tcf_qstats
.drops
++;
262 result
= TC_ACT_PIPE
;
265 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
270 spin_unlock(&ipt
->tcf_lock
);
275 static int tcf_ipt_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
278 unsigned char *b
= skb_tail_pointer(skb
);
279 struct tcf_ipt
*ipt
= to_ipt(a
);
280 struct xt_entry_target
*t
;
284 /* for simple targets kernel size == user size
285 * user name = target name
286 * for foolproof you need to not assume this
289 spin_lock_bh(&ipt
->tcf_lock
);
290 t
= kmemdup(ipt
->tcfi_t
, ipt
->tcfi_t
->u
.user
.target_size
, GFP_ATOMIC
);
292 goto nla_put_failure
;
294 c
.bindcnt
= atomic_read(&ipt
->tcf_bindcnt
) - bind
;
295 c
.refcnt
= refcount_read(&ipt
->tcf_refcnt
) - ref
;
296 strcpy(t
->u
.user
.name
, ipt
->tcfi_t
->u
.kernel
.target
->name
);
298 if (nla_put(skb
, TCA_IPT_TARG
, ipt
->tcfi_t
->u
.user
.target_size
, t
) ||
299 nla_put_u32(skb
, TCA_IPT_INDEX
, ipt
->tcf_index
) ||
300 nla_put_u32(skb
, TCA_IPT_HOOK
, ipt
->tcfi_hook
) ||
301 nla_put(skb
, TCA_IPT_CNT
, sizeof(struct tc_cnt
), &c
) ||
302 nla_put_string(skb
, TCA_IPT_TABLE
, ipt
->tcfi_tname
))
303 goto nla_put_failure
;
305 tcf_tm_dump(&tm
, &ipt
->tcf_tm
);
306 if (nla_put_64bit(skb
, TCA_IPT_TM
, sizeof(tm
), &tm
, TCA_IPT_PAD
))
307 goto nla_put_failure
;
309 spin_unlock_bh(&ipt
->tcf_lock
);
314 spin_unlock_bh(&ipt
->tcf_lock
);
320 static int tcf_ipt_walker(struct net
*net
, struct sk_buff
*skb
,
321 struct netlink_callback
*cb
, int type
,
322 const struct tc_action_ops
*ops
,
323 struct netlink_ext_ack
*extack
)
325 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
327 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
330 static int tcf_ipt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
332 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
334 return tcf_idr_search(tn
, a
, index
);
337 static struct tc_action_ops act_ipt_ops
= {
340 .owner
= THIS_MODULE
,
342 .dump
= tcf_ipt_dump
,
343 .cleanup
= tcf_ipt_release
,
344 .init
= tcf_ipt_init
,
345 .walk
= tcf_ipt_walker
,
346 .lookup
= tcf_ipt_search
,
347 .size
= sizeof(struct tcf_ipt
),
350 static __net_init
int ipt_init_net(struct net
*net
)
352 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
354 return tc_action_net_init(net
, tn
, &act_ipt_ops
);
357 static void __net_exit
ipt_exit_net(struct list_head
*net_list
)
359 tc_action_net_exit(net_list
, ipt_net_id
);
362 static struct pernet_operations ipt_net_ops
= {
363 .init
= ipt_init_net
,
364 .exit_batch
= ipt_exit_net
,
366 .size
= sizeof(struct tc_action_net
),
369 static int tcf_xt_walker(struct net
*net
, struct sk_buff
*skb
,
370 struct netlink_callback
*cb
, int type
,
371 const struct tc_action_ops
*ops
,
372 struct netlink_ext_ack
*extack
)
374 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
376 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
379 static int tcf_xt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
381 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
383 return tcf_idr_search(tn
, a
, index
);
386 static struct tc_action_ops act_xt_ops
= {
389 .owner
= THIS_MODULE
,
391 .dump
= tcf_ipt_dump
,
392 .cleanup
= tcf_ipt_release
,
394 .walk
= tcf_xt_walker
,
395 .lookup
= tcf_xt_search
,
396 .size
= sizeof(struct tcf_ipt
),
399 static __net_init
int xt_init_net(struct net
*net
)
401 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
403 return tc_action_net_init(net
, tn
, &act_xt_ops
);
406 static void __net_exit
xt_exit_net(struct list_head
*net_list
)
408 tc_action_net_exit(net_list
, xt_net_id
);
411 static struct pernet_operations xt_net_ops
= {
413 .exit_batch
= xt_exit_net
,
415 .size
= sizeof(struct tc_action_net
),
418 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
419 MODULE_DESCRIPTION("Iptables target actions");
420 MODULE_LICENSE("GPL");
421 MODULE_ALIAS("act_xt");
423 static int __init
ipt_init_module(void)
427 ret1
= tcf_register_action(&act_xt_ops
, &xt_net_ops
);
429 pr_err("Failed to load xt action\n");
431 ret2
= tcf_register_action(&act_ipt_ops
, &ipt_net_ops
);
433 pr_err("Failed to load ipt action\n");
435 if (ret1
< 0 && ret2
< 0) {
441 static void __exit
ipt_cleanup_module(void)
443 tcf_unregister_action(&act_ipt_ops
, &ipt_net_ops
);
444 tcf_unregister_action(&act_xt_ops
, &xt_net_ops
);
447 module_init(ipt_init_module
);
448 module_exit(ipt_cleanup_module
);