2 * net/sched/act_ipt.c iptables target interface
4 *TODO: Add other tables. For now we only support the ipv4 table targets
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Copyright: Jamal Hadi Salim (2002-13)
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 #include <linux/tc_act/tc_ipt.h>
26 #include <net/tc_act/tc_ipt.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
31 #define IPT_TAB_MASK 15
33 static int ipt_net_id
;
34 static struct tc_action_ops act_ipt_ops
;
37 static struct tc_action_ops act_xt_ops
;
39 static int ipt_init_target(struct xt_entry_target
*t
, char *table
,
42 struct xt_tgchk_param par
;
43 struct xt_target
*target
;
44 struct ipt_entry e
= {};
47 target
= xt_request_find_target(AF_INET
, t
->u
.user
.name
,
50 return PTR_ERR(target
);
52 t
->u
.kernel
.target
= target
;
53 memset(&par
, 0, sizeof(par
));
57 par
.targinfo
= t
->data
;
59 par
.family
= NFPROTO_IPV4
;
61 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
), 0, false);
63 module_put(t
->u
.kernel
.target
->me
);
69 static void ipt_destroy_target(struct xt_entry_target
*t
)
71 struct xt_tgdtor_param par
= {
72 .target
= t
->u
.kernel
.target
,
74 .family
= NFPROTO_IPV4
,
76 if (par
.target
->destroy
!= NULL
)
77 par
.target
->destroy(&par
);
78 module_put(par
.target
->me
);
81 static void tcf_ipt_release(struct tc_action
*a
, int bind
)
83 struct tcf_ipt
*ipt
= to_ipt(a
);
84 ipt_destroy_target(ipt
->tcfi_t
);
85 kfree(ipt
->tcfi_tname
);
89 static const struct nla_policy ipt_policy
[TCA_IPT_MAX
+ 1] = {
90 [TCA_IPT_TABLE
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
91 [TCA_IPT_HOOK
] = { .type
= NLA_U32
},
92 [TCA_IPT_INDEX
] = { .type
= NLA_U32
},
93 [TCA_IPT_TARG
] = { .len
= sizeof(struct xt_entry_target
) },
96 static int __tcf_ipt_init(struct tc_action_net
*tn
, struct nlattr
*nla
,
97 struct nlattr
*est
, struct tc_action
**a
,
98 const struct tc_action_ops
*ops
, int ovr
, int bind
)
100 struct nlattr
*tb
[TCA_IPT_MAX
+ 1];
102 struct xt_entry_target
*td
, *t
;
112 err
= nla_parse_nested(tb
, TCA_IPT_MAX
, nla
, ipt_policy
);
116 if (tb
[TCA_IPT_INDEX
] != NULL
)
117 index
= nla_get_u32(tb
[TCA_IPT_INDEX
]);
119 exists
= tcf_hash_check(tn
, index
, a
, bind
);
123 if (tb
[TCA_IPT_HOOK
] == NULL
|| tb
[TCA_IPT_TARG
] == NULL
) {
125 tcf_hash_release(*a
, bind
);
129 td
= (struct xt_entry_target
*)nla_data(tb
[TCA_IPT_TARG
]);
130 if (nla_len(tb
[TCA_IPT_TARG
]) < td
->u
.target_size
) {
132 tcf_hash_release(*a
, bind
);
137 ret
= tcf_hash_create(tn
, index
, est
, a
, ops
, bind
,
143 if (bind
)/* dont override defaults */
145 tcf_hash_release(*a
, bind
);
150 hook
= nla_get_u32(tb
[TCA_IPT_HOOK
]);
153 tname
= kmalloc(IFNAMSIZ
, GFP_KERNEL
);
154 if (unlikely(!tname
))
156 if (tb
[TCA_IPT_TABLE
] == NULL
||
157 nla_strlcpy(tname
, tb
[TCA_IPT_TABLE
], IFNAMSIZ
) >= IFNAMSIZ
)
158 strcpy(tname
, "mangle");
160 t
= kmemdup(td
, td
->u
.target_size
, GFP_KERNEL
);
164 err
= ipt_init_target(t
, tname
, hook
);
170 spin_lock_bh(&ipt
->tcf_lock
);
171 if (ret
!= ACT_P_CREATED
) {
172 ipt_destroy_target(ipt
->tcfi_t
);
173 kfree(ipt
->tcfi_tname
);
176 ipt
->tcfi_tname
= tname
;
178 ipt
->tcfi_hook
= hook
;
179 spin_unlock_bh(&ipt
->tcf_lock
);
180 if (ret
== ACT_P_CREATED
)
181 tcf_hash_insert(tn
, *a
);
189 if (ret
== ACT_P_CREATED
)
190 tcf_hash_cleanup(*a
, est
);
194 static int tcf_ipt_init(struct net
*net
, struct nlattr
*nla
,
195 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
198 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
200 return __tcf_ipt_init(tn
, nla
, est
, a
, &act_ipt_ops
, ovr
, bind
);
203 static int tcf_xt_init(struct net
*net
, struct nlattr
*nla
,
204 struct nlattr
*est
, struct tc_action
**a
, int ovr
,
207 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
209 return __tcf_ipt_init(tn
, nla
, est
, a
, &act_xt_ops
, ovr
, bind
);
212 static int tcf_ipt(struct sk_buff
*skb
, const struct tc_action
*a
,
213 struct tcf_result
*res
)
215 int ret
= 0, result
= 0;
216 struct tcf_ipt
*ipt
= to_ipt(a
);
217 struct xt_action_param par
;
219 if (skb_unclone(skb
, GFP_ATOMIC
))
220 return TC_ACT_UNSPEC
;
222 spin_lock(&ipt
->tcf_lock
);
224 tcf_lastuse_update(&ipt
->tcf_tm
);
225 bstats_update(&ipt
->tcf_bstats
, skb
);
227 /* yes, we have to worry about both in and out dev
228 * worry later - danger - this API seems to have changed
229 * from earlier kernels
231 par
.net
= dev_net(skb
->dev
);
234 par
.hooknum
= ipt
->tcfi_hook
;
235 par
.target
= ipt
->tcfi_t
->u
.kernel
.target
;
236 par
.targinfo
= ipt
->tcfi_t
->data
;
237 par
.family
= NFPROTO_IPV4
;
238 ret
= par
.target
->target(skb
, &par
);
245 result
= TC_ACT_SHOT
;
246 ipt
->tcf_qstats
.drops
++;
249 result
= TC_ACT_PIPE
;
252 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
257 spin_unlock(&ipt
->tcf_lock
);
262 static int tcf_ipt_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
,
265 unsigned char *b
= skb_tail_pointer(skb
);
266 struct tcf_ipt
*ipt
= to_ipt(a
);
267 struct xt_entry_target
*t
;
271 /* for simple targets kernel size == user size
272 * user name = target name
273 * for foolproof you need to not assume this
276 t
= kmemdup(ipt
->tcfi_t
, ipt
->tcfi_t
->u
.user
.target_size
, GFP_ATOMIC
);
278 goto nla_put_failure
;
280 c
.bindcnt
= ipt
->tcf_bindcnt
- bind
;
281 c
.refcnt
= ipt
->tcf_refcnt
- ref
;
282 strcpy(t
->u
.user
.name
, ipt
->tcfi_t
->u
.kernel
.target
->name
);
284 if (nla_put(skb
, TCA_IPT_TARG
, ipt
->tcfi_t
->u
.user
.target_size
, t
) ||
285 nla_put_u32(skb
, TCA_IPT_INDEX
, ipt
->tcf_index
) ||
286 nla_put_u32(skb
, TCA_IPT_HOOK
, ipt
->tcfi_hook
) ||
287 nla_put(skb
, TCA_IPT_CNT
, sizeof(struct tc_cnt
), &c
) ||
288 nla_put_string(skb
, TCA_IPT_TABLE
, ipt
->tcfi_tname
))
289 goto nla_put_failure
;
291 tcf_tm_dump(&tm
, &ipt
->tcf_tm
);
292 if (nla_put_64bit(skb
, TCA_IPT_TM
, sizeof(tm
), &tm
, TCA_IPT_PAD
))
293 goto nla_put_failure
;
304 static int tcf_ipt_walker(struct net
*net
, struct sk_buff
*skb
,
305 struct netlink_callback
*cb
, int type
,
306 const struct tc_action_ops
*ops
)
308 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
310 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
313 static int tcf_ipt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
315 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
317 return tcf_hash_search(tn
, a
, index
);
320 static struct tc_action_ops act_ipt_ops
= {
323 .owner
= THIS_MODULE
,
325 .dump
= tcf_ipt_dump
,
326 .cleanup
= tcf_ipt_release
,
327 .init
= tcf_ipt_init
,
328 .walk
= tcf_ipt_walker
,
329 .lookup
= tcf_ipt_search
,
330 .size
= sizeof(struct tcf_ipt
),
333 static __net_init
int ipt_init_net(struct net
*net
)
335 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
337 return tc_action_net_init(tn
, &act_ipt_ops
, IPT_TAB_MASK
);
340 static void __net_exit
ipt_exit_net(struct net
*net
)
342 struct tc_action_net
*tn
= net_generic(net
, ipt_net_id
);
344 tc_action_net_exit(tn
);
347 static struct pernet_operations ipt_net_ops
= {
348 .init
= ipt_init_net
,
349 .exit
= ipt_exit_net
,
351 .size
= sizeof(struct tc_action_net
),
354 static int tcf_xt_walker(struct net
*net
, struct sk_buff
*skb
,
355 struct netlink_callback
*cb
, int type
,
356 const struct tc_action_ops
*ops
)
358 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
360 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
363 static int tcf_xt_search(struct net
*net
, struct tc_action
**a
, u32 index
)
365 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
367 return tcf_hash_search(tn
, a
, index
);
370 static struct tc_action_ops act_xt_ops
= {
373 .owner
= THIS_MODULE
,
375 .dump
= tcf_ipt_dump
,
376 .cleanup
= tcf_ipt_release
,
378 .walk
= tcf_xt_walker
,
379 .lookup
= tcf_xt_search
,
380 .size
= sizeof(struct tcf_ipt
),
383 static __net_init
int xt_init_net(struct net
*net
)
385 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
387 return tc_action_net_init(tn
, &act_xt_ops
, IPT_TAB_MASK
);
390 static void __net_exit
xt_exit_net(struct net
*net
)
392 struct tc_action_net
*tn
= net_generic(net
, xt_net_id
);
394 tc_action_net_exit(tn
);
397 static struct pernet_operations xt_net_ops
= {
401 .size
= sizeof(struct tc_action_net
),
404 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
405 MODULE_DESCRIPTION("Iptables target actions");
406 MODULE_LICENSE("GPL");
407 MODULE_ALIAS("act_xt");
409 static int __init
ipt_init_module(void)
413 ret1
= tcf_register_action(&act_xt_ops
, &xt_net_ops
);
415 pr_err("Failed to load xt action\n");
417 ret2
= tcf_register_action(&act_ipt_ops
, &ipt_net_ops
);
419 pr_err("Failed to load ipt action\n");
421 if (ret1
< 0 && ret2
< 0) {
427 static void __exit
ipt_cleanup_module(void)
429 tcf_unregister_action(&act_ipt_ops
, &ipt_net_ops
);
430 tcf_unregister_action(&act_xt_ops
, &xt_net_ops
);
433 module_init(ipt_init_module
);
434 module_exit(ipt_cleanup_module
);