2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap
{
33 struct route4_filter
*filter
;
39 struct route4_fastmap fastmap
[16];
40 struct route4_bucket __rcu
*table
[256 + 1];
44 struct route4_bucket
{
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu
*ht
[16 + 16 + 1];
50 struct route4_filter
{
51 struct route4_filter __rcu
*next
;
55 struct tcf_result res
;
58 struct route4_bucket
*bkt
;
60 struct rcu_work rwork
;
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
65 static inline int route4_fastmap_hash(u32 id
, int iif
)
70 static DEFINE_SPINLOCK(fastmap_lock
);
72 route4_reset_fastmap(struct route4_head
*head
)
74 spin_lock_bh(&fastmap_lock
);
75 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
76 spin_unlock_bh(&fastmap_lock
);
80 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
81 struct route4_filter
*f
)
83 int h
= route4_fastmap_hash(id
, iif
);
85 /* fastmap updates must look atomic to aling id, iff, filter */
86 spin_lock_bh(&fastmap_lock
);
87 head
->fastmap
[h
].id
= id
;
88 head
->fastmap
[h
].iif
= iif
;
89 head
->fastmap
[h
].filter
= f
;
90 spin_unlock_bh(&fastmap_lock
);
93 static inline int route4_hash_to(u32 id
)
98 static inline int route4_hash_from(u32 id
)
100 return (id
>> 16) & 0xF;
103 static inline int route4_hash_iif(int iif
)
105 return 16 + ((iif
>> 16) & 0xF);
108 static inline int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_has_actions(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
129 struct tcf_result
*res
)
131 struct route4_head
*head
= rcu_dereference_bh(tp
->root
);
132 struct dst_entry
*dst
;
133 struct route4_bucket
*b
;
134 struct route4_filter
*f
;
136 int iif
, dont_cache
= 0;
146 h
= route4_fastmap_hash(id
, iif
);
148 spin_lock(&fastmap_lock
);
149 if (id
== head
->fastmap
[h
].id
&&
150 iif
== head
->fastmap
[h
].iif
&&
151 (f
= head
->fastmap
[h
].filter
) != NULL
) {
152 if (f
== ROUTE4_FAILURE
) {
153 spin_unlock(&fastmap_lock
);
158 spin_unlock(&fastmap_lock
);
161 spin_unlock(&fastmap_lock
);
163 h
= route4_hash_to(id
);
166 b
= rcu_dereference_bh(head
->table
[h
]);
168 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_from(id
)]);
170 f
= rcu_dereference_bh(f
->next
))
172 ROUTE4_APPLY_RESULT();
174 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_iif(iif
)]);
176 f
= rcu_dereference_bh(f
->next
))
178 ROUTE4_APPLY_RESULT();
180 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_wild()]);
182 f
= rcu_dereference_bh(f
->next
))
183 ROUTE4_APPLY_RESULT();
192 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
197 static inline u32
to_hash(u32 id
)
206 static inline u32
from_hash(u32 id
)
211 if (!(id
& 0x8000)) {
216 return 16 + (id
& 0xF);
219 static void *route4_get(struct tcf_proto
*tp
, u32 handle
)
221 struct route4_head
*head
= rtnl_dereference(tp
->root
);
222 struct route4_bucket
*b
;
223 struct route4_filter
*f
;
226 h1
= to_hash(handle
);
230 h2
= from_hash(handle
>> 16);
234 b
= rtnl_dereference(head
->table
[h1
]);
236 for (f
= rtnl_dereference(b
->ht
[h2
]);
238 f
= rtnl_dereference(f
->next
))
239 if (f
->handle
== handle
)
245 static int route4_init(struct tcf_proto
*tp
)
247 struct route4_head
*head
;
249 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
253 rcu_assign_pointer(tp
->root
, head
);
257 static void __route4_delete_filter(struct route4_filter
*f
)
259 tcf_exts_destroy(&f
->exts
);
260 tcf_exts_put_net(&f
->exts
);
264 static void route4_delete_filter_work(struct work_struct
*work
)
266 struct route4_filter
*f
= container_of(to_rcu_work(work
),
267 struct route4_filter
,
270 __route4_delete_filter(f
);
274 static void route4_queue_work(struct route4_filter
*f
)
276 tcf_queue_work(&f
->rwork
, route4_delete_filter_work
);
279 static void route4_destroy(struct tcf_proto
*tp
, struct netlink_ext_ack
*extack
)
281 struct route4_head
*head
= rtnl_dereference(tp
->root
);
287 for (h1
= 0; h1
<= 256; h1
++) {
288 struct route4_bucket
*b
;
290 b
= rtnl_dereference(head
->table
[h1
]);
292 for (h2
= 0; h2
<= 32; h2
++) {
293 struct route4_filter
*f
;
295 while ((f
= rtnl_dereference(b
->ht
[h2
])) != NULL
) {
296 struct route4_filter
*next
;
298 next
= rtnl_dereference(f
->next
);
299 RCU_INIT_POINTER(b
->ht
[h2
], next
);
300 tcf_unbind_filter(tp
, &f
->res
);
301 if (tcf_exts_get_net(&f
->exts
))
302 route4_queue_work(f
);
304 __route4_delete_filter(f
);
307 RCU_INIT_POINTER(head
->table
[h1
], NULL
);
311 kfree_rcu(head
, rcu
);
314 static int route4_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
315 struct netlink_ext_ack
*extack
)
317 struct route4_head
*head
= rtnl_dereference(tp
->root
);
318 struct route4_filter
*f
= arg
;
319 struct route4_filter __rcu
**fp
;
320 struct route4_filter
*nf
;
321 struct route4_bucket
*b
;
331 fp
= &b
->ht
[from_hash(h
>> 16)];
332 for (nf
= rtnl_dereference(*fp
); nf
;
333 fp
= &nf
->next
, nf
= rtnl_dereference(*fp
)) {
336 RCU_INIT_POINTER(*fp
, rtnl_dereference(f
->next
));
338 /* Remove any fastmap lookups that might ref filter
339 * notice we unlink'd the filter so we can't get it
340 * back in the fastmap.
342 route4_reset_fastmap(head
);
345 tcf_unbind_filter(tp
, &f
->res
);
346 tcf_exts_get_net(&f
->exts
);
347 tcf_queue_work(&f
->rwork
, route4_delete_filter_work
);
349 /* Strip RTNL protected tree */
350 for (i
= 0; i
<= 32; i
++) {
351 struct route4_filter
*rt
;
353 rt
= rtnl_dereference(b
->ht
[i
]);
358 /* OK, session has no flows */
359 RCU_INIT_POINTER(head
->table
[to_hash(h
)], NULL
);
367 for (h1
= 0; h1
<= 256; h1
++) {
368 if (rcu_access_pointer(head
->table
[h1
])) {
377 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
378 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
379 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
380 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
381 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
384 static int route4_set_parms(struct net
*net
, struct tcf_proto
*tp
,
385 unsigned long base
, struct route4_filter
*f
,
386 u32 handle
, struct route4_head
*head
,
387 struct nlattr
**tb
, struct nlattr
*est
, int new,
388 bool ovr
, struct netlink_ext_ack
*extack
)
390 u32 id
= 0, to
= 0, nhandle
= 0x8000;
391 struct route4_filter
*fp
;
393 struct route4_bucket
*b
;
396 err
= tcf_exts_validate(net
, tp
, tb
, est
, &f
->exts
, ovr
, extack
);
400 if (tb
[TCA_ROUTE4_TO
]) {
401 if (new && handle
& 0x8000)
403 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
409 if (tb
[TCA_ROUTE4_FROM
]) {
410 if (tb
[TCA_ROUTE4_IIF
])
412 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
416 } else if (tb
[TCA_ROUTE4_IIF
]) {
417 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
420 nhandle
|= (id
| 0x8000) << 16;
422 nhandle
|= 0xFFFF << 16;
425 nhandle
|= handle
& 0x7F00;
426 if (nhandle
!= handle
)
430 h1
= to_hash(nhandle
);
431 b
= rtnl_dereference(head
->table
[h1
]);
433 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
437 rcu_assign_pointer(head
->table
[h1
], b
);
439 unsigned int h2
= from_hash(nhandle
>> 16);
441 for (fp
= rtnl_dereference(b
->ht
[h2
]);
443 fp
= rtnl_dereference(fp
->next
))
444 if (fp
->handle
== f
->handle
)
448 if (tb
[TCA_ROUTE4_TO
])
451 if (tb
[TCA_ROUTE4_FROM
])
453 else if (tb
[TCA_ROUTE4_IIF
])
460 if (tb
[TCA_ROUTE4_CLASSID
]) {
461 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
462 tcf_bind_filter(tp
, &f
->res
, base
);
468 static int route4_change(struct net
*net
, struct sk_buff
*in_skb
,
469 struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
470 struct nlattr
**tca
, void **arg
, bool ovr
,
471 struct netlink_ext_ack
*extack
)
473 struct route4_head
*head
= rtnl_dereference(tp
->root
);
474 struct route4_filter __rcu
**fp
;
475 struct route4_filter
*fold
, *f1
, *pfp
, *f
= NULL
;
476 struct route4_bucket
*b
;
477 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
478 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
484 return handle
? -EINVAL
: 0;
486 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
, NULL
);
491 if (fold
&& handle
&& fold
->handle
!= handle
)
495 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
499 err
= tcf_exts_init(&f
->exts
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
507 f
->handle
= fold
->handle
;
514 err
= route4_set_parms(net
, tp
, base
, f
, handle
, head
, tb
,
515 tca
[TCA_RATE
], new, ovr
, extack
);
519 h
= from_hash(f
->handle
>> 16);
521 for (pfp
= rtnl_dereference(*fp
);
522 (f1
= rtnl_dereference(*fp
)) != NULL
;
524 if (f
->handle
< f1
->handle
)
527 tcf_block_netif_keep_dst(tp
->chain
->block
);
528 rcu_assign_pointer(f
->next
, f1
);
529 rcu_assign_pointer(*fp
, f
);
531 if (fold
&& fold
->handle
&& f
->handle
!= fold
->handle
) {
532 th
= to_hash(fold
->handle
);
533 h
= from_hash(fold
->handle
>> 16);
534 b
= rtnl_dereference(head
->table
[th
]);
537 for (pfp
= rtnl_dereference(*fp
); pfp
;
538 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
)) {
547 route4_reset_fastmap(head
);
550 tcf_unbind_filter(tp
, &fold
->res
);
551 tcf_exts_get_net(&fold
->exts
);
552 tcf_queue_work(&fold
->rwork
, route4_delete_filter_work
);
558 tcf_exts_destroy(&f
->exts
);
563 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
565 struct route4_head
*head
= rtnl_dereference(tp
->root
);
574 for (h
= 0; h
<= 256; h
++) {
575 struct route4_bucket
*b
= rtnl_dereference(head
->table
[h
]);
578 for (h1
= 0; h1
<= 32; h1
++) {
579 struct route4_filter
*f
;
581 for (f
= rtnl_dereference(b
->ht
[h1
]);
583 f
= rtnl_dereference(f
->next
)) {
584 if (arg
->count
< arg
->skip
) {
588 if (arg
->fn(tp
, f
, arg
) < 0) {
599 static int route4_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
600 struct sk_buff
*skb
, struct tcmsg
*t
)
602 struct route4_filter
*f
= fh
;
609 t
->tcm_handle
= f
->handle
;
611 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
613 goto nla_put_failure
;
615 if (!(f
->handle
& 0x8000)) {
617 if (nla_put_u32(skb
, TCA_ROUTE4_TO
, id
))
618 goto nla_put_failure
;
620 if (f
->handle
& 0x80000000) {
621 if ((f
->handle
>> 16) != 0xFFFF &&
622 nla_put_u32(skb
, TCA_ROUTE4_IIF
, f
->iif
))
623 goto nla_put_failure
;
626 if (nla_put_u32(skb
, TCA_ROUTE4_FROM
, id
))
627 goto nla_put_failure
;
629 if (f
->res
.classid
&&
630 nla_put_u32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
))
631 goto nla_put_failure
;
633 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
634 goto nla_put_failure
;
636 nla_nest_end(skb
, nest
);
638 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
639 goto nla_put_failure
;
644 nla_nest_cancel(skb
, nest
);
648 static void route4_bind_class(void *fh
, u32 classid
, unsigned long cl
)
650 struct route4_filter
*f
= fh
;
652 if (f
&& f
->res
.classid
== classid
)
656 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
658 .classify
= route4_classify
,
660 .destroy
= route4_destroy
,
662 .change
= route4_change
,
663 .delete = route4_delete
,
666 .bind_class
= route4_bind_class
,
667 .owner
= THIS_MODULE
,
670 static int __init
init_route4(void)
672 return register_tcf_proto_ops(&cls_route4_ops
);
675 static void __exit
exit_route4(void)
677 unregister_tcf_proto_ops(&cls_route4_ops
);
680 module_init(init_route4
)
681 module_exit(exit_route4
)
682 MODULE_LICENSE("GPL");