2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
26 #include <net/pkt_cls.h>
28 #include <net/route.h>
29 #include <net/flow_keys.h>
31 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
32 #include <net/netfilter/nf_conntrack.h>
36 struct list_head filters
;
40 struct list_head list
;
42 struct tcf_ematch_tree ematches
;
43 struct timer_list perturb_timer
;
59 static inline u32
addr_fold(void *addr
)
61 unsigned long a
= (unsigned long)addr
;
63 return (a
& 0xFFFFFFFF) ^ (BITS_PER_LONG
> 32 ? a
>> 32 : 0);
66 static u32
flow_get_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
69 return ntohl(flow
->src
);
70 return addr_fold(skb
->sk
);
73 static u32
flow_get_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
76 return ntohl(flow
->dst
);
77 return addr_fold(skb_dst(skb
)) ^ (__force u16
)skb
->protocol
;
80 static u32
flow_get_proto(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
82 return flow
->ip_proto
;
85 static u32
flow_get_proto_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
88 return ntohs(flow
->port16
[0]);
90 return addr_fold(skb
->sk
);
93 static u32
flow_get_proto_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
96 return ntohs(flow
->port16
[1]);
98 return addr_fold(skb_dst(skb
)) ^ (__force u16
)skb
->protocol
;
101 static u32
flow_get_iif(const struct sk_buff
*skb
)
106 static u32
flow_get_priority(const struct sk_buff
*skb
)
108 return skb
->priority
;
111 static u32
flow_get_mark(const struct sk_buff
*skb
)
116 static u32
flow_get_nfct(const struct sk_buff
*skb
)
118 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
119 return addr_fold(skb
->nfct
);
125 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
126 #define CTTUPLE(skb, member) \
128 enum ip_conntrack_info ctinfo; \
129 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
132 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
135 #define CTTUPLE(skb, member) \
142 static u32
flow_get_nfct_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
144 switch (skb
->protocol
) {
145 case htons(ETH_P_IP
):
146 return ntohl(CTTUPLE(skb
, src
.u3
.ip
));
147 case htons(ETH_P_IPV6
):
148 return ntohl(CTTUPLE(skb
, src
.u3
.ip6
[3]));
151 return flow_get_src(skb
, flow
);
154 static u32
flow_get_nfct_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
156 switch (skb
->protocol
) {
157 case htons(ETH_P_IP
):
158 return ntohl(CTTUPLE(skb
, dst
.u3
.ip
));
159 case htons(ETH_P_IPV6
):
160 return ntohl(CTTUPLE(skb
, dst
.u3
.ip6
[3]));
163 return flow_get_dst(skb
, flow
);
166 static u32
flow_get_nfct_proto_src(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
168 return ntohs(CTTUPLE(skb
, src
.u
.all
));
170 return flow_get_proto_src(skb
, flow
);
173 static u32
flow_get_nfct_proto_dst(const struct sk_buff
*skb
, const struct flow_keys
*flow
)
175 return ntohs(CTTUPLE(skb
, dst
.u
.all
));
177 return flow_get_proto_dst(skb
, flow
);
180 static u32
flow_get_rtclassid(const struct sk_buff
*skb
)
182 #ifdef CONFIG_IP_ROUTE_CLASSID
184 return skb_dst(skb
)->tclassid
;
189 static u32
flow_get_skuid(const struct sk_buff
*skb
)
191 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
) {
192 kuid_t skuid
= skb
->sk
->sk_socket
->file
->f_cred
->fsuid
;
193 return from_kuid(&init_user_ns
, skuid
);
198 static u32
flow_get_skgid(const struct sk_buff
*skb
)
200 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
) {
201 kgid_t skgid
= skb
->sk
->sk_socket
->file
->f_cred
->fsgid
;
202 return from_kgid(&init_user_ns
, skgid
);
207 static u32
flow_get_vlan_tag(const struct sk_buff
*skb
)
209 u16
uninitialized_var(tag
);
211 if (vlan_get_tag(skb
, &tag
) < 0)
213 return tag
& VLAN_VID_MASK
;
216 static u32
flow_get_rxhash(struct sk_buff
*skb
)
218 return skb_get_hash(skb
);
221 static u32
flow_key_get(struct sk_buff
*skb
, int key
, struct flow_keys
*flow
)
225 return flow_get_src(skb
, flow
);
227 return flow_get_dst(skb
, flow
);
229 return flow_get_proto(skb
, flow
);
230 case FLOW_KEY_PROTO_SRC
:
231 return flow_get_proto_src(skb
, flow
);
232 case FLOW_KEY_PROTO_DST
:
233 return flow_get_proto_dst(skb
, flow
);
235 return flow_get_iif(skb
);
236 case FLOW_KEY_PRIORITY
:
237 return flow_get_priority(skb
);
239 return flow_get_mark(skb
);
241 return flow_get_nfct(skb
);
242 case FLOW_KEY_NFCT_SRC
:
243 return flow_get_nfct_src(skb
, flow
);
244 case FLOW_KEY_NFCT_DST
:
245 return flow_get_nfct_dst(skb
, flow
);
246 case FLOW_KEY_NFCT_PROTO_SRC
:
247 return flow_get_nfct_proto_src(skb
, flow
);
248 case FLOW_KEY_NFCT_PROTO_DST
:
249 return flow_get_nfct_proto_dst(skb
, flow
);
250 case FLOW_KEY_RTCLASSID
:
251 return flow_get_rtclassid(skb
);
253 return flow_get_skuid(skb
);
255 return flow_get_skgid(skb
);
256 case FLOW_KEY_VLAN_TAG
:
257 return flow_get_vlan_tag(skb
);
258 case FLOW_KEY_RXHASH
:
259 return flow_get_rxhash(skb
);
266 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
267 (1 << FLOW_KEY_DST) | \
268 (1 << FLOW_KEY_PROTO) | \
269 (1 << FLOW_KEY_PROTO_SRC) | \
270 (1 << FLOW_KEY_PROTO_DST) | \
271 (1 << FLOW_KEY_NFCT_SRC) | \
272 (1 << FLOW_KEY_NFCT_DST) | \
273 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
274 (1 << FLOW_KEY_NFCT_PROTO_DST))
276 static int flow_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
277 struct tcf_result
*res
)
279 struct flow_head
*head
= tp
->root
;
280 struct flow_filter
*f
;
286 list_for_each_entry(f
, &head
->filters
, list
) {
287 u32 keys
[FLOW_KEY_MAX
+ 1];
288 struct flow_keys flow_keys
;
290 if (!tcf_em_tree_match(skb
, &f
->ematches
, NULL
))
293 keymask
= f
->keymask
;
294 if (keymask
& FLOW_KEYS_NEEDED
)
295 skb_flow_dissect(skb
, &flow_keys
);
297 for (n
= 0; n
< f
->nkeys
; n
++) {
298 key
= ffs(keymask
) - 1;
299 keymask
&= ~(1 << key
);
300 keys
[n
] = flow_key_get(skb
, key
, &flow_keys
);
303 if (f
->mode
== FLOW_MODE_HASH
)
304 classid
= jhash2(keys
, f
->nkeys
, f
->hashrnd
);
307 classid
= (classid
& f
->mask
) ^ f
->xor;
308 classid
= (classid
>> f
->rshift
) + f
->addend
;
312 classid
%= f
->divisor
;
315 res
->classid
= TC_H_MAKE(f
->baseclass
, f
->baseclass
+ classid
);
317 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
325 static void flow_perturbation(unsigned long arg
)
327 struct flow_filter
*f
= (struct flow_filter
*)arg
;
329 get_random_bytes(&f
->hashrnd
, 4);
330 if (f
->perturb_period
)
331 mod_timer(&f
->perturb_timer
, jiffies
+ f
->perturb_period
);
334 static const struct nla_policy flow_policy
[TCA_FLOW_MAX
+ 1] = {
335 [TCA_FLOW_KEYS
] = { .type
= NLA_U32
},
336 [TCA_FLOW_MODE
] = { .type
= NLA_U32
},
337 [TCA_FLOW_BASECLASS
] = { .type
= NLA_U32
},
338 [TCA_FLOW_RSHIFT
] = { .type
= NLA_U32
},
339 [TCA_FLOW_ADDEND
] = { .type
= NLA_U32
},
340 [TCA_FLOW_MASK
] = { .type
= NLA_U32
},
341 [TCA_FLOW_XOR
] = { .type
= NLA_U32
},
342 [TCA_FLOW_DIVISOR
] = { .type
= NLA_U32
},
343 [TCA_FLOW_ACT
] = { .type
= NLA_NESTED
},
344 [TCA_FLOW_POLICE
] = { .type
= NLA_NESTED
},
345 [TCA_FLOW_EMATCHES
] = { .type
= NLA_NESTED
},
346 [TCA_FLOW_PERTURB
] = { .type
= NLA_U32
},
349 static int flow_change(struct net
*net
, struct sk_buff
*in_skb
,
350 struct tcf_proto
*tp
, unsigned long base
,
351 u32 handle
, struct nlattr
**tca
,
352 unsigned long *arg
, bool ovr
)
354 struct flow_head
*head
= tp
->root
;
355 struct flow_filter
*f
;
356 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
357 struct nlattr
*tb
[TCA_FLOW_MAX
+ 1];
359 struct tcf_ematch_tree t
;
360 unsigned int nkeys
= 0;
361 unsigned int perturb_period
= 0;
370 err
= nla_parse_nested(tb
, TCA_FLOW_MAX
, opt
, flow_policy
);
374 if (tb
[TCA_FLOW_BASECLASS
]) {
375 baseclass
= nla_get_u32(tb
[TCA_FLOW_BASECLASS
]);
376 if (TC_H_MIN(baseclass
) == 0)
380 if (tb
[TCA_FLOW_KEYS
]) {
381 keymask
= nla_get_u32(tb
[TCA_FLOW_KEYS
]);
383 nkeys
= hweight32(keymask
);
387 if (fls(keymask
) - 1 > FLOW_KEY_MAX
)
390 if ((keymask
& (FLOW_KEY_SKUID
|FLOW_KEY_SKGID
)) &&
391 sk_user_ns(NETLINK_CB(in_skb
).sk
) != &init_user_ns
)
395 tcf_exts_init(&e
, TCA_FLOW_ACT
, TCA_FLOW_POLICE
);
396 err
= tcf_exts_validate(net
, tp
, tb
, tca
[TCA_RATE
], &e
, ovr
);
400 err
= tcf_em_tree_validate(tp
, tb
[TCA_FLOW_EMATCHES
], &t
);
404 f
= (struct flow_filter
*)*arg
;
407 if (f
->handle
!= handle
&& handle
)
411 if (tb
[TCA_FLOW_MODE
])
412 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
413 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
416 if (mode
== FLOW_MODE_HASH
)
417 perturb_period
= f
->perturb_period
;
418 if (tb
[TCA_FLOW_PERTURB
]) {
419 if (mode
!= FLOW_MODE_HASH
)
421 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
427 if (!tb
[TCA_FLOW_KEYS
])
430 mode
= FLOW_MODE_MAP
;
431 if (tb
[TCA_FLOW_MODE
])
432 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
433 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
436 if (tb
[TCA_FLOW_PERTURB
]) {
437 if (mode
!= FLOW_MODE_HASH
)
439 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
442 if (TC_H_MAJ(baseclass
) == 0)
443 baseclass
= TC_H_MAKE(tp
->q
->handle
, baseclass
);
444 if (TC_H_MIN(baseclass
) == 0)
445 baseclass
= TC_H_MAKE(baseclass
, 1);
448 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
454 tcf_exts_init(&f
->exts
, TCA_FLOW_ACT
, TCA_FLOW_POLICE
);
456 get_random_bytes(&f
->hashrnd
, 4);
457 f
->perturb_timer
.function
= flow_perturbation
;
458 f
->perturb_timer
.data
= (unsigned long)f
;
459 init_timer_deferrable(&f
->perturb_timer
);
462 tcf_exts_change(tp
, &f
->exts
, &e
);
463 tcf_em_tree_change(tp
, &f
->ematches
, &t
);
467 if (tb
[TCA_FLOW_KEYS
]) {
468 f
->keymask
= keymask
;
474 if (tb
[TCA_FLOW_MASK
])
475 f
->mask
= nla_get_u32(tb
[TCA_FLOW_MASK
]);
476 if (tb
[TCA_FLOW_XOR
])
477 f
->xor = nla_get_u32(tb
[TCA_FLOW_XOR
]);
478 if (tb
[TCA_FLOW_RSHIFT
])
479 f
->rshift
= nla_get_u32(tb
[TCA_FLOW_RSHIFT
]);
480 if (tb
[TCA_FLOW_ADDEND
])
481 f
->addend
= nla_get_u32(tb
[TCA_FLOW_ADDEND
]);
483 if (tb
[TCA_FLOW_DIVISOR
])
484 f
->divisor
= nla_get_u32(tb
[TCA_FLOW_DIVISOR
]);
486 f
->baseclass
= baseclass
;
488 f
->perturb_period
= perturb_period
;
489 del_timer(&f
->perturb_timer
);
491 mod_timer(&f
->perturb_timer
, jiffies
+ perturb_period
);
494 list_add_tail(&f
->list
, &head
->filters
);
498 *arg
= (unsigned long)f
;
502 tcf_em_tree_destroy(tp
, &t
);
504 tcf_exts_destroy(tp
, &e
);
508 static void flow_destroy_filter(struct tcf_proto
*tp
, struct flow_filter
*f
)
510 del_timer_sync(&f
->perturb_timer
);
511 tcf_exts_destroy(tp
, &f
->exts
);
512 tcf_em_tree_destroy(tp
, &f
->ematches
);
516 static int flow_delete(struct tcf_proto
*tp
, unsigned long arg
)
518 struct flow_filter
*f
= (struct flow_filter
*)arg
;
523 flow_destroy_filter(tp
, f
);
527 static int flow_init(struct tcf_proto
*tp
)
529 struct flow_head
*head
;
531 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
534 INIT_LIST_HEAD(&head
->filters
);
539 static void flow_destroy(struct tcf_proto
*tp
)
541 struct flow_head
*head
= tp
->root
;
542 struct flow_filter
*f
, *next
;
544 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
546 flow_destroy_filter(tp
, f
);
551 static unsigned long flow_get(struct tcf_proto
*tp
, u32 handle
)
553 struct flow_head
*head
= tp
->root
;
554 struct flow_filter
*f
;
556 list_for_each_entry(f
, &head
->filters
, list
)
557 if (f
->handle
== handle
)
558 return (unsigned long)f
;
562 static void flow_put(struct tcf_proto
*tp
, unsigned long f
)
566 static int flow_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
567 struct sk_buff
*skb
, struct tcmsg
*t
)
569 struct flow_filter
*f
= (struct flow_filter
*)fh
;
575 t
->tcm_handle
= f
->handle
;
577 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
579 goto nla_put_failure
;
581 if (nla_put_u32(skb
, TCA_FLOW_KEYS
, f
->keymask
) ||
582 nla_put_u32(skb
, TCA_FLOW_MODE
, f
->mode
))
583 goto nla_put_failure
;
585 if (f
->mask
!= ~0 || f
->xor != 0) {
586 if (nla_put_u32(skb
, TCA_FLOW_MASK
, f
->mask
) ||
587 nla_put_u32(skb
, TCA_FLOW_XOR
, f
->xor))
588 goto nla_put_failure
;
591 nla_put_u32(skb
, TCA_FLOW_RSHIFT
, f
->rshift
))
592 goto nla_put_failure
;
594 nla_put_u32(skb
, TCA_FLOW_ADDEND
, f
->addend
))
595 goto nla_put_failure
;
598 nla_put_u32(skb
, TCA_FLOW_DIVISOR
, f
->divisor
))
599 goto nla_put_failure
;
601 nla_put_u32(skb
, TCA_FLOW_BASECLASS
, f
->baseclass
))
602 goto nla_put_failure
;
604 if (f
->perturb_period
&&
605 nla_put_u32(skb
, TCA_FLOW_PERTURB
, f
->perturb_period
/ HZ
))
606 goto nla_put_failure
;
608 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
609 goto nla_put_failure
;
610 #ifdef CONFIG_NET_EMATCH
611 if (f
->ematches
.hdr
.nmatches
&&
612 tcf_em_tree_dump(skb
, &f
->ematches
, TCA_FLOW_EMATCHES
) < 0)
613 goto nla_put_failure
;
615 nla_nest_end(skb
, nest
);
617 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
618 goto nla_put_failure
;
623 nlmsg_trim(skb
, nest
);
627 static void flow_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
629 struct flow_head
*head
= tp
->root
;
630 struct flow_filter
*f
;
632 list_for_each_entry(f
, &head
->filters
, list
) {
633 if (arg
->count
< arg
->skip
)
635 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
644 static struct tcf_proto_ops cls_flow_ops __read_mostly
= {
646 .classify
= flow_classify
,
648 .destroy
= flow_destroy
,
649 .change
= flow_change
,
650 .delete = flow_delete
,
655 .owner
= THIS_MODULE
,
658 static int __init
cls_flow_init(void)
660 return register_tcf_proto_ops(&cls_flow_ops
);
663 static void __exit
cls_flow_exit(void)
665 unregister_tcf_proto_ops(&cls_flow_ops
);
668 module_init(cls_flow_init
);
669 module_exit(cls_flow_exit
);
671 MODULE_LICENSE("GPL");
672 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
673 MODULE_DESCRIPTION("TC flow classifier");