1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_ipt.c iptables target interface
5 *TODO: Add other tables. For now we only support the ipv4 table targets
7 * Copyright: Jamal Hadi Salim (2002-13)
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <linux/tc_act/tc_ipt.h>
22 #include <net/tc_act/tc_ipt.h>
24 #include <linux/netfilter_ipv4/ip_tables.h>
27 static unsigned int ipt_net_id
;
28 static struct tc_action_ops act_ipt_ops
;
30 static unsigned int xt_net_id
;
31 static struct tc_action_ops act_xt_ops
;
33 static int ipt_init_target(struct net
*net
, struct xt_entry_target
*t
,
34 char *table
, unsigned int hook
)
36 struct xt_tgchk_param par
;
37 struct xt_target
*target
;
38 struct ipt_entry e
= {};
41 target
= xt_request_find_target(AF_INET
, t
->u
.user
.name
,
44 return PTR_ERR(target
);
46 t
->u
.kernel
.target
= target
;
47 memset(&par
, 0, sizeof(par
));
52 par
.targinfo
= t
->data
;
54 par
.family
= NFPROTO_IPV4
;
56 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
58 module_put(t
->u
.kernel
.target
->me
);
64 static void ipt_destroy_target(struct xt_entry_target
*t
, struct net
*net
)
66 struct xt_tgdtor_param par
= {
67 .target
= t
->u
.kernel
.target
,
69 .family
= NFPROTO_IPV4
,
72 if (par
.target
->destroy
!= NULL
)
73 par
.target
->destroy(&par
);
74 module_put(par
.target
->me
);
77 static void tcf_ipt_release(struct tc_action
*a
)
79 struct tcf_ipt
*ipt
= to_ipt(a
);
82 ipt_destroy_target(ipt
->tcfi_t
, a
->idrinfo
->net
);
85 kfree(ipt
->tcfi_tname
);
88 static const struct nla_policy ipt_policy
[TCA_IPT_MAX
+ 1] = {
89 [TCA_IPT_TABLE
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
90 [TCA_IPT_HOOK
] = { .type
= NLA_U32
},
91 [TCA_IPT_INDEX
] = { .type
= NLA_U32
},
92 [TCA_IPT_TARG
] = { .len
= sizeof(struct xt_entry_target
) },
95 static int __tcf_ipt_init(struct net
*net
, unsigned int id
, struct nlattr
*nla
,
96 struct nlattr
*est
, struct tc_action
**a
,
97 const struct tc_action_ops
*ops
, int ovr
, int bind
,
98 struct tcf_proto
*tp
, u32 flags
)
100 struct tc_action_net
*tn
= net_generic(net
, id
);
101 struct nlattr
*tb
[TCA_IPT_MAX
+ 1];
103 struct xt_entry_target
*td
, *t
;
113 err
= nla_parse_nested_deprecated(tb
, TCA_IPT_MAX
, nla
, ipt_policy
,
118 if (tb
[TCA_IPT_INDEX
] != NULL
)
119 index
= nla_get_u32(tb
[TCA_IPT_INDEX
]);
121 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
128 if (tb
[TCA_IPT_HOOK
] == NULL
|| tb
[TCA_IPT_TARG
] == NULL
) {
130 tcf_idr_release(*a
, bind
);
132 tcf_idr_cleanup(tn
, index
);
136 td
= (struct xt_entry_target
*)nla_data(tb
[TCA_IPT_TARG
]);
137 if (nla_len(tb
[TCA_IPT_TARG
]) != td
->u
.target_size
) {
139 tcf_idr_release(*a
, bind
);
141 tcf_idr_cleanup(tn
, index
);
146 ret
= tcf_idr_create(tn
, index
, est
, a
, ops
, bind
,
149 tcf_idr_cleanup(tn
, index
);
154 if (bind
)/* dont override defaults */
158 tcf_idr_release(*a
, bind
);
162 hook
= nla_get_u32(tb
[TCA_IPT_HOOK
]);
165 tname
= kmalloc(IFNAMSIZ
, GFP_KERNEL
);
166 if (unlikely(!tname
))
168 if (tb
[TCA_IPT_TABLE
] == NULL
||
169 nla_strscpy(tname
, tb
[TCA_IPT_TABLE
], IFNAMSIZ
) >= IFNAMSIZ
)
170 strcpy(tname
, "mangle");
172 t
= kmemdup(td
, td
->u
.target_size
, GFP_KERNEL
);
176 err
= ipt_init_target(net
, t
, tname
, hook
);
182 spin_lock_bh(&ipt
->tcf_lock
);
183 if (ret
!= ACT_P_CREATED
) {
184 ipt_destroy_target(ipt
->tcfi_t
, net
);
185 kfree(ipt
->tcfi_tname
);
188 ipt
->tcfi_tname
= tname
;
190 ipt
->tcfi_hook
= hook
;
191 spin_unlock_bh(&ipt
->tcf_lock
);
199 tcf_idr_release(*a
, bind
);
203 static int tcf_ipt_init(struct net
*net
, struct nlattr
*nla
,
204 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
205 int bind
, bool rtnl_held
, struct tcf_proto
*tp
,
206 u32 flags
, struct netlink_ext_ack
*extack
)
208 return __tcf_ipt_init(net
, ipt_net_id
, nla
, est
, a
, &act_ipt_ops
, ovr
,
212 static int tcf_xt_init(struct net
*net
, struct nlattr
*nla
,
213 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
214 int bind
, bool unlocked
, struct tcf_proto
*tp
,
215 u32 flags
, struct netlink_ext_ack
*extack
)
217 return __tcf_ipt_init(net
, xt_net_id
, nla
, est
, a
, &act_xt_ops
, ovr
,
221 static int tcf_ipt_act(struct sk_buff
*skb
, const struct tc_action
*a
,
222 struct tcf_result
*res
)
224 int ret
= 0, result
= 0;
225 struct tcf_ipt
*ipt
= to_ipt(a
);
226 struct xt_action_param par
;
227 struct nf_hook_state state
= {
228 .net
= dev_net(skb
->dev
),
230 .hook
= ipt
->tcfi_hook
,
234 if (skb_unclone(skb
, GFP_ATOMIC
))
235 return TC_ACT_UNSPEC
;
237 spin_lock(&ipt
->tcf_lock
);
239 tcf_lastuse_update(&ipt
->tcf_tm
);
240 bstats_update(&ipt
->tcf_bstats
, skb
);
242 /* yes, we have to worry about both in and out dev
243 * worry later - danger - this API seems to have changed
244 * from earlier kernels
247 par
.target
= ipt
->tcfi_t
->u
.kernel
.target
;
248 par
.targinfo
= ipt
->tcfi_t
->data
;
249 ret
= par
.target
->target(skb
, &par
);
256 result
= TC_ACT_SHOT
;
257 ipt
->tcf_qstats
.drops
++;
260 result
= TC_ACT_PIPE
;
263 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
268 spin_unlock(&ipt
->tcf_lock
);
273 static int tcf_ipt_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
276 unsigned char *b
= skb_tail_pointer(skb
);
277 struct tcf_ipt
*ipt
= to_ipt(a
);
278 struct xt_entry_target
*t
;
282 /* for simple targets kernel size == user size
283 * user name = target name
284 * for foolproof you need to not assume this
287 spin_lock_bh(&ipt
->tcf_lock
);
288 t
= kmemdup(ipt
->tcfi_t
, ipt
->tcfi_t
->u
.user
.target_size
, GFP_ATOMIC
);
290 goto nla_put_failure
;
292 c
.bindcnt
= atomic_read(&ipt
->tcf_bindcnt
) - bind
;
293 c
.refcnt
= refcount_read(&ipt
->tcf_refcnt
) - ref
;
294 strcpy(t
->u
.user
.name
, ipt
->tcfi_t
->u
.kernel
.target
->name
);
296 if (nla_put(skb
, TCA_IPT_TARG
, ipt
->tcfi_t
->u
.user
.target_size
, t
) ||
297 nla_put_u32(skb
, TCA_IPT_INDEX
, ipt
->tcf_index
) ||
298 nla_put_u32(skb
, TCA_IPT_HOOK
, ipt
->tcfi_hook
) ||
299 nla_put(skb
, TCA_IPT_CNT
, sizeof(struct tc_cnt
), &c
) ||
300 nla_put_string(skb
, TCA_IPT_TABLE
, ipt
->tcfi_tname
))
301 goto nla_put_failure
;
303 tcf_tm_dump(&tm
, &ipt
->tcf_tm
);
304 if (nla_put_64bit(skb
, TCA_IPT_TM
, sizeof(tm
), &tm
, TCA_IPT_PAD
))
305 goto nla_put_failure
;
307 spin_unlock_bh(&ipt
->tcf_lock
);
312 spin_unlock_bh(&ipt
->tcf_lock
);
318 static int tcf_ipt_walker(struct net
*net
, struct sk_buff
*skb
,
319 struct netlink_callback
*cb
, int type
,
320 const struct tc_action_ops
*ops
,
321 struct netlink_ext_ack
*extack
)
323 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
325 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
328 static int tcf_ipt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
330 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
332 return tcf_idr_search(tn
, a
, index
);
335 static struct tc_action_ops act_ipt_ops
= {
338 .owner
= THIS_MODULE
,
340 .dump
= tcf_ipt_dump
,
341 .cleanup
= tcf_ipt_release
,
342 .init
= tcf_ipt_init
,
343 .walk
= tcf_ipt_walker
,
344 .lookup
= tcf_ipt_search
,
345 .size
= sizeof(struct tcf_ipt
),
348 static __net_init
int ipt_init_net(struct net
*net
)
350 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
352 return tc_action_net_init(net
, tn
, &act_ipt_ops
);
355 static void __net_exit
ipt_exit_net(struct list_head
*net_list
)
357 tc_action_net_exit(net_list
, ipt_net_id
);
360 static struct pernet_operations ipt_net_ops
= {
361 .init
= ipt_init_net
,
362 .exit_batch
= ipt_exit_net
,
364 .size
= sizeof(struct tc_action_net
),
367 static int tcf_xt_walker(struct net
*net
, struct sk_buff
*skb
,
368 struct netlink_callback
*cb
, int type
,
369 const struct tc_action_ops
*ops
,
370 struct netlink_ext_ack
*extack
)
372 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
374 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
377 static int tcf_xt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
379 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
381 return tcf_idr_search(tn
, a
, index
);
384 static struct tc_action_ops act_xt_ops
= {
387 .owner
= THIS_MODULE
,
389 .dump
= tcf_ipt_dump
,
390 .cleanup
= tcf_ipt_release
,
392 .walk
= tcf_xt_walker
,
393 .lookup
= tcf_xt_search
,
394 .size
= sizeof(struct tcf_ipt
),
397 static __net_init
int xt_init_net(struct net
*net
)
399 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
401 return tc_action_net_init(net
, tn
, &act_xt_ops
);
404 static void __net_exit
xt_exit_net(struct list_head
*net_list
)
406 tc_action_net_exit(net_list
, xt_net_id
);
409 static struct pernet_operations xt_net_ops
= {
411 .exit_batch
= xt_exit_net
,
413 .size
= sizeof(struct tc_action_net
),
416 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
417 MODULE_DESCRIPTION("Iptables target actions");
418 MODULE_LICENSE("GPL");
419 MODULE_ALIAS("act_xt");
421 static int __init
ipt_init_module(void)
425 ret1
= tcf_register_action(&act_xt_ops
, &xt_net_ops
);
427 pr_err("Failed to load xt action\n");
429 ret2
= tcf_register_action(&act_ipt_ops
, &ipt_net_ops
);
431 pr_err("Failed to load ipt action\n");
433 if (ret1
< 0 && ret2
< 0) {
439 static void __exit
ipt_cleanup_module(void)
441 tcf_unregister_action(&act_ipt_ops
, &ipt_net_ops
);
442 tcf_unregister_action(&act_xt_ops
, &xt_net_ops
);
445 module_init(ipt_init_module
);
446 module_exit(ipt_cleanup_module
);