1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
9 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
10 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
29 struct fw_filter __rcu
*ht
[HTSIZE
];
34 struct fw_filter __rcu
*next
;
36 struct tcf_result res
;
40 struct rcu_work rwork
;
43 static u32
fw_hash(u32 handle
)
45 handle
^= (handle
>> 16);
46 handle
^= (handle
>> 8);
47 return handle
% HTSIZE
;
50 static int fw_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
51 struct tcf_result
*res
)
53 struct fw_head
*head
= rcu_dereference_bh(tp
->root
);
61 for (f
= rcu_dereference_bh(head
->ht
[fw_hash(id
)]); f
;
62 f
= rcu_dereference_bh(f
->next
)) {
65 if (!tcf_match_indev(skb
, f
->ifindex
))
67 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
75 struct Qdisc
*q
= tcf_block_q(tp
->chain
->block
);
77 /* Old method: classify the packet using its skb mark. */
78 if (id
&& (TC_H_MAJ(id
) == 0 ||
79 !(TC_H_MAJ(id
^ q
->handle
)))) {
89 static void *fw_get(struct tcf_proto
*tp
, u32 handle
)
91 struct fw_head
*head
= rtnl_dereference(tp
->root
);
97 f
= rtnl_dereference(head
->ht
[fw_hash(handle
)]);
98 for (; f
; f
= rtnl_dereference(f
->next
)) {
105 static int fw_init(struct tcf_proto
*tp
)
107 /* We don't allocate fw_head here, because in the old method
108 * we don't need it at all.
113 static void __fw_delete_filter(struct fw_filter
*f
)
115 tcf_exts_destroy(&f
->exts
);
116 tcf_exts_put_net(&f
->exts
);
120 static void fw_delete_filter_work(struct work_struct
*work
)
122 struct fw_filter
*f
= container_of(to_rcu_work(work
),
126 __fw_delete_filter(f
);
130 static void fw_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
131 struct netlink_ext_ack
*extack
)
133 struct fw_head
*head
= rtnl_dereference(tp
->root
);
140 for (h
= 0; h
< HTSIZE
; h
++) {
141 while ((f
= rtnl_dereference(head
->ht
[h
])) != NULL
) {
142 RCU_INIT_POINTER(head
->ht
[h
],
143 rtnl_dereference(f
->next
));
144 tcf_unbind_filter(tp
, &f
->res
);
145 if (tcf_exts_get_net(&f
->exts
))
146 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
148 __fw_delete_filter(f
);
151 kfree_rcu(head
, rcu
);
154 static int fw_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
155 bool rtnl_held
, struct netlink_ext_ack
*extack
)
157 struct fw_head
*head
= rtnl_dereference(tp
->root
);
158 struct fw_filter
*f
= arg
;
159 struct fw_filter __rcu
**fp
;
160 struct fw_filter
*pfp
;
164 if (head
== NULL
|| f
== NULL
)
167 fp
= &head
->ht
[fw_hash(f
->id
)];
169 for (pfp
= rtnl_dereference(*fp
); pfp
;
170 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
)) {
172 RCU_INIT_POINTER(*fp
, rtnl_dereference(f
->next
));
173 tcf_unbind_filter(tp
, &f
->res
);
174 tcf_exts_get_net(&f
->exts
);
175 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
182 for (h
= 0; h
< HTSIZE
; h
++) {
183 if (rcu_access_pointer(head
->ht
[h
])) {
193 static const struct nla_policy fw_policy
[TCA_FW_MAX
+ 1] = {
194 [TCA_FW_CLASSID
] = { .type
= NLA_U32
},
195 [TCA_FW_INDEV
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
196 [TCA_FW_MASK
] = { .type
= NLA_U32
},
199 static int fw_set_parms(struct net
*net
, struct tcf_proto
*tp
,
200 struct fw_filter
*f
, struct nlattr
**tb
,
201 struct nlattr
**tca
, unsigned long base
, bool ovr
,
202 struct netlink_ext_ack
*extack
)
204 struct fw_head
*head
= rtnl_dereference(tp
->root
);
208 err
= tcf_exts_validate(net
, tp
, tb
, tca
[TCA_RATE
], &f
->exts
, ovr
,
213 if (tb
[TCA_FW_CLASSID
]) {
214 f
->res
.classid
= nla_get_u32(tb
[TCA_FW_CLASSID
]);
215 tcf_bind_filter(tp
, &f
->res
, base
);
218 if (tb
[TCA_FW_INDEV
]) {
220 ret
= tcf_change_indev(net
, tb
[TCA_FW_INDEV
], extack
);
227 if (tb
[TCA_FW_MASK
]) {
228 mask
= nla_get_u32(tb
[TCA_FW_MASK
]);
229 if (mask
!= head
->mask
)
231 } else if (head
->mask
!= 0xFFFFFFFF)
237 static int fw_change(struct net
*net
, struct sk_buff
*in_skb
,
238 struct tcf_proto
*tp
, unsigned long base
,
239 u32 handle
, struct nlattr
**tca
, void **arg
,
240 bool ovr
, bool rtnl_held
,
241 struct netlink_ext_ack
*extack
)
243 struct fw_head
*head
= rtnl_dereference(tp
->root
);
244 struct fw_filter
*f
= *arg
;
245 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
246 struct nlattr
*tb
[TCA_FW_MAX
+ 1];
250 return handle
? -EINVAL
: 0; /* Succeed if it is old method. */
252 err
= nla_parse_nested_deprecated(tb
, TCA_FW_MAX
, opt
, fw_policy
,
258 struct fw_filter
*pfp
, *fnew
;
259 struct fw_filter __rcu
**fp
;
261 if (f
->id
!= handle
&& handle
)
264 fnew
= kzalloc(sizeof(struct fw_filter
), GFP_KERNEL
);
270 fnew
->ifindex
= f
->ifindex
;
273 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FW_ACT
,
280 err
= fw_set_parms(net
, tp
, fnew
, tb
, tca
, base
, ovr
, extack
);
282 tcf_exts_destroy(&fnew
->exts
);
287 fp
= &head
->ht
[fw_hash(fnew
->id
)];
288 for (pfp
= rtnl_dereference(*fp
); pfp
;
289 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
))
293 RCU_INIT_POINTER(fnew
->next
, rtnl_dereference(pfp
->next
));
294 rcu_assign_pointer(*fp
, fnew
);
295 tcf_unbind_filter(tp
, &f
->res
);
296 tcf_exts_get_net(&f
->exts
);
297 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
307 u32 mask
= 0xFFFFFFFF;
309 mask
= nla_get_u32(tb
[TCA_FW_MASK
]);
311 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
316 rcu_assign_pointer(tp
->root
, head
);
319 f
= kzalloc(sizeof(struct fw_filter
), GFP_KERNEL
);
323 err
= tcf_exts_init(&f
->exts
, net
, TCA_FW_ACT
, TCA_FW_POLICE
);
329 err
= fw_set_parms(net
, tp
, f
, tb
, tca
, base
, ovr
, extack
);
333 RCU_INIT_POINTER(f
->next
, head
->ht
[fw_hash(handle
)]);
334 rcu_assign_pointer(head
->ht
[fw_hash(handle
)], f
);
340 tcf_exts_destroy(&f
->exts
);
345 static void fw_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
348 struct fw_head
*head
= rtnl_dereference(tp
->root
);
357 for (h
= 0; h
< HTSIZE
; h
++) {
360 for (f
= rtnl_dereference(head
->ht
[h
]); f
;
361 f
= rtnl_dereference(f
->next
)) {
362 if (arg
->count
< arg
->skip
) {
366 if (arg
->fn(tp
, f
, arg
) < 0) {
375 static int fw_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
376 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
378 struct fw_head
*head
= rtnl_dereference(tp
->root
);
379 struct fw_filter
*f
= fh
;
385 t
->tcm_handle
= f
->id
;
387 if (!f
->res
.classid
&& !tcf_exts_has_actions(&f
->exts
))
390 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
392 goto nla_put_failure
;
394 if (f
->res
.classid
&&
395 nla_put_u32(skb
, TCA_FW_CLASSID
, f
->res
.classid
))
396 goto nla_put_failure
;
398 struct net_device
*dev
;
399 dev
= __dev_get_by_index(net
, f
->ifindex
);
400 if (dev
&& nla_put_string(skb
, TCA_FW_INDEV
, dev
->name
))
401 goto nla_put_failure
;
403 if (head
->mask
!= 0xFFFFFFFF &&
404 nla_put_u32(skb
, TCA_FW_MASK
, head
->mask
))
405 goto nla_put_failure
;
407 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
408 goto nla_put_failure
;
410 nla_nest_end(skb
, nest
);
412 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
413 goto nla_put_failure
;
418 nla_nest_cancel(skb
, nest
);
422 static void fw_bind_class(void *fh
, u32 classid
, unsigned long cl
, void *q
,
425 struct fw_filter
*f
= fh
;
427 if (f
&& f
->res
.classid
== classid
) {
429 __tcf_bind_filter(q
, &f
->res
, base
);
431 __tcf_unbind_filter(q
, &f
->res
);
435 static struct tcf_proto_ops cls_fw_ops __read_mostly
= {
437 .classify
= fw_classify
,
439 .destroy
= fw_destroy
,
445 .bind_class
= fw_bind_class
,
446 .owner
= THIS_MODULE
,
449 static int __init
init_fw(void)
451 return register_tcf_proto_ops(&cls_fw_ops
);
454 static void __exit
exit_fw(void)
456 unregister_tcf_proto_ops(&cls_fw_ops
);
461 MODULE_LICENSE("GPL");