1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
9 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
10 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
24 #include <net/tc_wrapper.h>
30 struct fw_filter __rcu
*ht
[HTSIZE
];
35 struct fw_filter __rcu
*next
;
37 struct tcf_result res
;
41 struct rcu_work rwork
;
44 static u32
fw_hash(u32 handle
)
46 handle
^= (handle
>> 16);
47 handle
^= (handle
>> 8);
48 return handle
% HTSIZE
;
51 TC_INDIRECT_SCOPE
int fw_classify(struct sk_buff
*skb
,
52 const struct tcf_proto
*tp
,
53 struct tcf_result
*res
)
55 struct fw_head
*head
= rcu_dereference_bh(tp
->root
);
63 for (f
= rcu_dereference_bh(head
->ht
[fw_hash(id
)]); f
;
64 f
= rcu_dereference_bh(f
->next
)) {
67 if (!tcf_match_indev(skb
, f
->ifindex
))
69 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
77 struct Qdisc
*q
= tcf_block_q(tp
->chain
->block
);
79 /* Old method: classify the packet using its skb mark. */
80 if (id
&& (TC_H_MAJ(id
) == 0 ||
81 !(TC_H_MAJ(id
^ q
->handle
)))) {
91 static void *fw_get(struct tcf_proto
*tp
, u32 handle
)
93 struct fw_head
*head
= rtnl_dereference(tp
->root
);
99 f
= rtnl_dereference(head
->ht
[fw_hash(handle
)]);
100 for (; f
; f
= rtnl_dereference(f
->next
)) {
107 static int fw_init(struct tcf_proto
*tp
)
109 /* We don't allocate fw_head here, because in the old method
110 * we don't need it at all.
115 static void __fw_delete_filter(struct fw_filter
*f
)
117 tcf_exts_destroy(&f
->exts
);
118 tcf_exts_put_net(&f
->exts
);
122 static void fw_delete_filter_work(struct work_struct
*work
)
124 struct fw_filter
*f
= container_of(to_rcu_work(work
),
128 __fw_delete_filter(f
);
132 static void fw_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
133 struct netlink_ext_ack
*extack
)
135 struct fw_head
*head
= rtnl_dereference(tp
->root
);
142 for (h
= 0; h
< HTSIZE
; h
++) {
143 while ((f
= rtnl_dereference(head
->ht
[h
])) != NULL
) {
144 RCU_INIT_POINTER(head
->ht
[h
],
145 rtnl_dereference(f
->next
));
146 tcf_unbind_filter(tp
, &f
->res
);
147 if (tcf_exts_get_net(&f
->exts
))
148 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
150 __fw_delete_filter(f
);
153 kfree_rcu(head
, rcu
);
156 static int fw_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
157 bool rtnl_held
, struct netlink_ext_ack
*extack
)
159 struct fw_head
*head
= rtnl_dereference(tp
->root
);
160 struct fw_filter
*f
= arg
;
161 struct fw_filter __rcu
**fp
;
162 struct fw_filter
*pfp
;
166 if (head
== NULL
|| f
== NULL
)
169 fp
= &head
->ht
[fw_hash(f
->id
)];
171 for (pfp
= rtnl_dereference(*fp
); pfp
;
172 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
)) {
174 RCU_INIT_POINTER(*fp
, rtnl_dereference(f
->next
));
175 tcf_unbind_filter(tp
, &f
->res
);
176 tcf_exts_get_net(&f
->exts
);
177 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
184 for (h
= 0; h
< HTSIZE
; h
++) {
185 if (rcu_access_pointer(head
->ht
[h
])) {
195 static const struct nla_policy fw_policy
[TCA_FW_MAX
+ 1] = {
196 [TCA_FW_CLASSID
] = { .type
= NLA_U32
},
197 [TCA_FW_INDEV
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
},
198 [TCA_FW_MASK
] = { .type
= NLA_U32
},
201 static int fw_set_parms(struct net
*net
, struct tcf_proto
*tp
,
202 struct fw_filter
*f
, struct nlattr
**tb
,
203 struct nlattr
**tca
, unsigned long base
, u32 flags
,
204 struct netlink_ext_ack
*extack
)
206 struct fw_head
*head
= rtnl_dereference(tp
->root
);
210 err
= tcf_exts_validate(net
, tp
, tb
, tca
[TCA_RATE
], &f
->exts
, flags
,
215 if (tb
[TCA_FW_INDEV
]) {
217 ret
= tcf_change_indev(net
, tb
[TCA_FW_INDEV
], extack
);
224 if (tb
[TCA_FW_MASK
]) {
225 mask
= nla_get_u32(tb
[TCA_FW_MASK
]);
226 if (mask
!= head
->mask
)
228 } else if (head
->mask
!= 0xFFFFFFFF)
231 if (tb
[TCA_FW_CLASSID
]) {
232 f
->res
.classid
= nla_get_u32(tb
[TCA_FW_CLASSID
]);
233 tcf_bind_filter(tp
, &f
->res
, base
);
239 static int fw_change(struct net
*net
, struct sk_buff
*in_skb
,
240 struct tcf_proto
*tp
, unsigned long base
,
241 u32 handle
, struct nlattr
**tca
, void **arg
,
242 u32 flags
, struct netlink_ext_ack
*extack
)
244 struct fw_head
*head
= rtnl_dereference(tp
->root
);
245 struct fw_filter
*f
= *arg
;
246 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
247 struct nlattr
*tb
[TCA_FW_MAX
+ 1];
251 return handle
? -EINVAL
: 0; /* Succeed if it is old method. */
253 err
= nla_parse_nested_deprecated(tb
, TCA_FW_MAX
, opt
, fw_policy
,
259 struct fw_filter
*pfp
, *fnew
;
260 struct fw_filter __rcu
**fp
;
262 if (f
->id
!= handle
&& handle
)
265 fnew
= kzalloc(sizeof(struct fw_filter
), GFP_KERNEL
);
270 fnew
->ifindex
= f
->ifindex
;
273 err
= tcf_exts_init(&fnew
->exts
, net
, TCA_FW_ACT
,
280 err
= fw_set_parms(net
, tp
, fnew
, tb
, tca
, base
, flags
, extack
);
282 tcf_exts_destroy(&fnew
->exts
);
287 fp
= &head
->ht
[fw_hash(fnew
->id
)];
288 for (pfp
= rtnl_dereference(*fp
); pfp
;
289 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
))
293 RCU_INIT_POINTER(fnew
->next
, rtnl_dereference(pfp
->next
));
294 rcu_assign_pointer(*fp
, fnew
);
295 tcf_unbind_filter(tp
, &f
->res
);
296 tcf_exts_get_net(&f
->exts
);
297 tcf_queue_work(&f
->rwork
, fw_delete_filter_work
);
307 u32 mask
= 0xFFFFFFFF;
309 mask
= nla_get_u32(tb
[TCA_FW_MASK
]);
311 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
316 rcu_assign_pointer(tp
->root
, head
);
319 f
= kzalloc(sizeof(struct fw_filter
), GFP_KERNEL
);
323 err
= tcf_exts_init(&f
->exts
, net
, TCA_FW_ACT
, TCA_FW_POLICE
);
329 err
= fw_set_parms(net
, tp
, f
, tb
, tca
, base
, flags
, extack
);
333 RCU_INIT_POINTER(f
->next
, head
->ht
[fw_hash(handle
)]);
334 rcu_assign_pointer(head
->ht
[fw_hash(handle
)], f
);
340 tcf_exts_destroy(&f
->exts
);
345 static void fw_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
348 struct fw_head
*head
= rtnl_dereference(tp
->root
);
357 for (h
= 0; h
< HTSIZE
; h
++) {
360 for (f
= rtnl_dereference(head
->ht
[h
]); f
;
361 f
= rtnl_dereference(f
->next
)) {
362 if (!tc_cls_stats_dump(tp
, arg
, f
))
368 static int fw_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
369 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
371 struct fw_head
*head
= rtnl_dereference(tp
->root
);
372 struct fw_filter
*f
= fh
;
378 t
->tcm_handle
= f
->id
;
380 if (!f
->res
.classid
&& !tcf_exts_has_actions(&f
->exts
))
383 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
385 goto nla_put_failure
;
387 if (f
->res
.classid
&&
388 nla_put_u32(skb
, TCA_FW_CLASSID
, f
->res
.classid
))
389 goto nla_put_failure
;
391 struct net_device
*dev
;
392 dev
= __dev_get_by_index(net
, f
->ifindex
);
393 if (dev
&& nla_put_string(skb
, TCA_FW_INDEV
, dev
->name
))
394 goto nla_put_failure
;
396 if (head
->mask
!= 0xFFFFFFFF &&
397 nla_put_u32(skb
, TCA_FW_MASK
, head
->mask
))
398 goto nla_put_failure
;
400 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
401 goto nla_put_failure
;
403 nla_nest_end(skb
, nest
);
405 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
406 goto nla_put_failure
;
411 nla_nest_cancel(skb
, nest
);
415 static void fw_bind_class(void *fh
, u32 classid
, unsigned long cl
, void *q
,
418 struct fw_filter
*f
= fh
;
420 tc_cls_bind_class(classid
, cl
, q
, &f
->res
, base
);
423 static struct tcf_proto_ops cls_fw_ops __read_mostly
= {
425 .classify
= fw_classify
,
427 .destroy
= fw_destroy
,
433 .bind_class
= fw_bind_class
,
434 .owner
= THIS_MODULE
,
436 MODULE_ALIAS_NET_CLS("fw");
438 static int __init
init_fw(void)
440 return register_tcf_proto_ops(&cls_fw_ops
);
443 static void __exit
exit_fw(void)
445 unregister_tcf_proto_ops(&cls_fw_ops
);
450 MODULE_DESCRIPTION("SKB mark based TC classifier");
451 MODULE_LICENSE("GPL");