2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "flow_netlink.h"
22 #include <linux/uaccess.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <net/llc_pdu.h>
28 #include <linux/kernel.h>
29 #include <linux/jhash.h>
30 #include <linux/jiffies.h>
31 #include <linux/llc.h>
32 #include <linux/module.h>
34 #include <linux/rcupdate.h>
35 #include <linux/cpumask.h>
36 #include <linux/if_arp.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
47 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
52 static struct kmem_cache
*flow_cache
;
53 struct kmem_cache
*flow_stats_cache __read_mostly
;
55 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
57 return range
->end
- range
->start
;
60 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
61 bool full
, const struct sw_flow_mask
*mask
)
63 int start
= full
? 0 : mask
->range
.start
;
64 int len
= full
? sizeof *dst
: range_n_bytes(&mask
->range
);
65 const long *m
= (const long *)((const u8
*)&mask
->key
+ start
);
66 const long *s
= (const long *)((const u8
*)src
+ start
);
67 long *d
= (long *)((u8
*)dst
+ start
);
70 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
71 * if 'full' is false the memory outside of the 'mask->range' is left
72 * uninitialized. This can be used as an optimization when further
73 * operations on 'dst' only use contents within 'mask->range'.
75 for (i
= 0; i
< len
; i
+= sizeof(long))
79 struct sw_flow
*ovs_flow_alloc(void)
82 struct flow_stats
*stats
;
84 flow
= kmem_cache_zalloc(flow_cache
, GFP_KERNEL
);
86 return ERR_PTR(-ENOMEM
);
88 flow
->stats_last_writer
= -1;
90 /* Initialize the default stat node. */
91 stats
= kmem_cache_alloc_node(flow_stats_cache
,
92 GFP_KERNEL
| __GFP_ZERO
,
93 node_online(0) ? 0 : NUMA_NO_NODE
);
97 spin_lock_init(&stats
->lock
);
99 RCU_INIT_POINTER(flow
->stats
[0], stats
);
101 cpumask_set_cpu(0, &flow
->cpu_used_mask
);
105 kmem_cache_free(flow_cache
, flow
);
106 return ERR_PTR(-ENOMEM
);
109 int ovs_flow_tbl_count(const struct flow_table
*table
)
114 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
116 struct flex_array
*buckets
;
119 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
120 n_buckets
, GFP_KERNEL
);
124 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
126 flex_array_free(buckets
);
130 for (i
= 0; i
< n_buckets
; i
++)
131 INIT_HLIST_HEAD((struct hlist_head
*)
132 flex_array_get(buckets
, i
));
137 static void flow_free(struct sw_flow
*flow
)
141 if (ovs_identifier_is_key(&flow
->id
))
142 kfree(flow
->id
.unmasked_key
);
144 ovs_nla_free_flow_actions((struct sw_flow_actions __force
*)flow
->sf_acts
);
145 /* We open code this to make sure cpu 0 is always considered */
146 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
= cpumask_next(cpu
, &flow
->cpu_used_mask
))
147 if (flow
->stats
[cpu
])
148 kmem_cache_free(flow_stats_cache
,
149 (struct flow_stats __force
*)flow
->stats
[cpu
]);
150 kmem_cache_free(flow_cache
, flow
);
153 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
155 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
160 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
166 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
171 static void free_buckets(struct flex_array
*buckets
)
173 flex_array_free(buckets
);
177 static void __table_instance_destroy(struct table_instance
*ti
)
179 free_buckets(ti
->buckets
);
183 static struct table_instance
*table_instance_alloc(int new_size
)
185 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
190 ti
->buckets
= alloc_buckets(new_size
);
196 ti
->n_buckets
= new_size
;
198 ti
->keep_flows
= false;
199 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
204 int ovs_flow_tbl_init(struct flow_table
*table
)
206 struct table_instance
*ti
, *ufid_ti
;
208 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
213 ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
217 rcu_assign_pointer(table
->ti
, ti
);
218 rcu_assign_pointer(table
->ufid_ti
, ufid_ti
);
219 INIT_LIST_HEAD(&table
->mask_list
);
220 table
->last_rehash
= jiffies
;
222 table
->ufid_count
= 0;
226 __table_instance_destroy(ti
);
230 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
232 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
234 __table_instance_destroy(ti
);
237 static void table_instance_destroy(struct table_instance
*ti
,
238 struct table_instance
*ufid_ti
,
250 for (i
= 0; i
< ti
->n_buckets
; i
++) {
251 struct sw_flow
*flow
;
252 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
253 struct hlist_node
*n
;
254 int ver
= ti
->node_ver
;
255 int ufid_ver
= ufid_ti
->node_ver
;
257 hlist_for_each_entry_safe(flow
, n
, head
, flow_table
.node
[ver
]) {
258 hlist_del_rcu(&flow
->flow_table
.node
[ver
]);
259 if (ovs_identifier_is_ufid(&flow
->id
))
260 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ver
]);
261 ovs_flow_free(flow
, deferred
);
267 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
268 call_rcu(&ufid_ti
->rcu
, flow_tbl_destroy_rcu_cb
);
270 __table_instance_destroy(ti
);
271 __table_instance_destroy(ufid_ti
);
275 /* No need for locking this function is called from RCU callback or
278 void ovs_flow_tbl_destroy(struct flow_table
*table
)
280 struct table_instance
*ti
= rcu_dereference_raw(table
->ti
);
281 struct table_instance
*ufid_ti
= rcu_dereference_raw(table
->ufid_ti
);
283 table_instance_destroy(ti
, ufid_ti
, false);
286 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
287 u32
*bucket
, u32
*last
)
289 struct sw_flow
*flow
;
290 struct hlist_head
*head
;
295 while (*bucket
< ti
->n_buckets
) {
297 head
= flex_array_get(ti
->buckets
, *bucket
);
298 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ver
]) {
313 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
315 hash
= jhash_1word(hash
, ti
->hash_seed
);
316 return flex_array_get(ti
->buckets
,
317 (hash
& (ti
->n_buckets
- 1)));
320 static void table_instance_insert(struct table_instance
*ti
,
321 struct sw_flow
*flow
)
323 struct hlist_head
*head
;
325 head
= find_bucket(ti
, flow
->flow_table
.hash
);
326 hlist_add_head_rcu(&flow
->flow_table
.node
[ti
->node_ver
], head
);
329 static void ufid_table_instance_insert(struct table_instance
*ti
,
330 struct sw_flow
*flow
)
332 struct hlist_head
*head
;
334 head
= find_bucket(ti
, flow
->ufid_table
.hash
);
335 hlist_add_head_rcu(&flow
->ufid_table
.node
[ti
->node_ver
], head
);
338 static void flow_table_copy_flows(struct table_instance
*old
,
339 struct table_instance
*new, bool ufid
)
344 old_ver
= old
->node_ver
;
345 new->node_ver
= !old_ver
;
347 /* Insert in new table. */
348 for (i
= 0; i
< old
->n_buckets
; i
++) {
349 struct sw_flow
*flow
;
350 struct hlist_head
*head
;
352 head
= flex_array_get(old
->buckets
, i
);
355 hlist_for_each_entry(flow
, head
,
356 ufid_table
.node
[old_ver
])
357 ufid_table_instance_insert(new, flow
);
359 hlist_for_each_entry(flow
, head
,
360 flow_table
.node
[old_ver
])
361 table_instance_insert(new, flow
);
364 old
->keep_flows
= true;
367 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
368 int n_buckets
, bool ufid
)
370 struct table_instance
*new_ti
;
372 new_ti
= table_instance_alloc(n_buckets
);
376 flow_table_copy_flows(ti
, new_ti
, ufid
);
381 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
383 struct table_instance
*old_ti
, *new_ti
;
384 struct table_instance
*old_ufid_ti
, *new_ufid_ti
;
386 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
389 new_ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
393 old_ti
= ovsl_dereference(flow_table
->ti
);
394 old_ufid_ti
= ovsl_dereference(flow_table
->ufid_ti
);
396 rcu_assign_pointer(flow_table
->ti
, new_ti
);
397 rcu_assign_pointer(flow_table
->ufid_ti
, new_ufid_ti
);
398 flow_table
->last_rehash
= jiffies
;
399 flow_table
->count
= 0;
400 flow_table
->ufid_count
= 0;
402 table_instance_destroy(old_ti
, old_ufid_ti
, true);
406 __table_instance_destroy(new_ti
);
410 static u32
flow_hash(const struct sw_flow_key
*key
,
411 const struct sw_flow_key_range
*range
)
413 int key_start
= range
->start
;
414 int key_end
= range
->end
;
415 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ key_start
);
416 int hash_u32s
= (key_end
- key_start
) >> 2;
418 /* Make sure number of hash bytes are multiple of u32. */
419 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
421 return jhash2(hash_key
, hash_u32s
, 0);
424 static int flow_key_start(const struct sw_flow_key
*key
)
429 return rounddown(offsetof(struct sw_flow_key
, phy
),
433 static bool cmp_key(const struct sw_flow_key
*key1
,
434 const struct sw_flow_key
*key2
,
435 int key_start
, int key_end
)
437 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
438 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
442 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
443 diffs
|= *cp1
++ ^ *cp2
++;
448 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
449 const struct sw_flow_key
*key
,
450 const struct sw_flow_key_range
*range
)
452 return cmp_key(&flow
->key
, key
, range
->start
, range
->end
);
455 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
456 const struct sw_flow_match
*match
)
458 struct sw_flow_key
*key
= match
->key
;
459 int key_start
= flow_key_start(key
);
460 int key_end
= match
->range
.end
;
462 BUG_ON(ovs_identifier_is_ufid(&flow
->id
));
463 return cmp_key(flow
->id
.unmasked_key
, key
, key_start
, key_end
);
466 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
467 const struct sw_flow_key
*unmasked
,
468 const struct sw_flow_mask
*mask
)
470 struct sw_flow
*flow
;
471 struct hlist_head
*head
;
473 struct sw_flow_key masked_key
;
475 ovs_flow_mask_key(&masked_key
, unmasked
, false, mask
);
476 hash
= flow_hash(&masked_key
, &mask
->range
);
477 head
= find_bucket(ti
, hash
);
478 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ti
->node_ver
]) {
479 if (flow
->mask
== mask
&& flow
->flow_table
.hash
== hash
&&
480 flow_cmp_masked_key(flow
, &masked_key
, &mask
->range
))
486 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
487 const struct sw_flow_key
*key
,
490 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
491 struct sw_flow_mask
*mask
;
492 struct sw_flow
*flow
;
495 list_for_each_entry_rcu(mask
, &tbl
->mask_list
, list
) {
497 flow
= masked_flow_lookup(ti
, key
, mask
);
498 if (flow
) /* Found */
504 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
505 const struct sw_flow_key
*key
)
507 u32 __always_unused n_mask_hit
;
509 return ovs_flow_tbl_lookup_stats(tbl
, key
, &n_mask_hit
);
512 struct sw_flow
*ovs_flow_tbl_lookup_exact(struct flow_table
*tbl
,
513 const struct sw_flow_match
*match
)
515 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
516 struct sw_flow_mask
*mask
;
517 struct sw_flow
*flow
;
519 /* Always called under ovs-mutex. */
520 list_for_each_entry(mask
, &tbl
->mask_list
, list
) {
521 flow
= masked_flow_lookup(ti
, match
->key
, mask
);
522 if (flow
&& ovs_identifier_is_key(&flow
->id
) &&
523 ovs_flow_cmp_unmasked_key(flow
, match
))
529 static u32
ufid_hash(const struct sw_flow_id
*sfid
)
531 return jhash(sfid
->ufid
, sfid
->ufid_len
, 0);
534 static bool ovs_flow_cmp_ufid(const struct sw_flow
*flow
,
535 const struct sw_flow_id
*sfid
)
537 if (flow
->id
.ufid_len
!= sfid
->ufid_len
)
540 return !memcmp(flow
->id
.ufid
, sfid
->ufid
, sfid
->ufid_len
);
543 bool ovs_flow_cmp(const struct sw_flow
*flow
, const struct sw_flow_match
*match
)
545 if (ovs_identifier_is_ufid(&flow
->id
))
546 return flow_cmp_masked_key(flow
, match
->key
, &match
->range
);
548 return ovs_flow_cmp_unmasked_key(flow
, match
);
551 struct sw_flow
*ovs_flow_tbl_lookup_ufid(struct flow_table
*tbl
,
552 const struct sw_flow_id
*ufid
)
554 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ufid_ti
);
555 struct sw_flow
*flow
;
556 struct hlist_head
*head
;
559 hash
= ufid_hash(ufid
);
560 head
= find_bucket(ti
, hash
);
561 hlist_for_each_entry_rcu(flow
, head
, ufid_table
.node
[ti
->node_ver
]) {
562 if (flow
->ufid_table
.hash
== hash
&&
563 ovs_flow_cmp_ufid(flow
, ufid
))
569 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
571 struct sw_flow_mask
*mask
;
574 list_for_each_entry(mask
, &table
->mask_list
, list
)
580 static struct table_instance
*table_instance_expand(struct table_instance
*ti
,
583 return table_instance_rehash(ti
, ti
->n_buckets
* 2, ufid
);
586 /* Remove 'mask' from the mask list, if it is not needed any more. */
587 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
590 /* ovs-lock is required to protect mask-refcount and
594 BUG_ON(!mask
->ref_count
);
597 if (!mask
->ref_count
) {
598 list_del_rcu(&mask
->list
);
599 kfree_rcu(mask
, rcu
);
604 /* Must be called with OVS mutex held. */
605 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
607 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
608 struct table_instance
*ufid_ti
= ovsl_dereference(table
->ufid_ti
);
610 BUG_ON(table
->count
== 0);
611 hlist_del_rcu(&flow
->flow_table
.node
[ti
->node_ver
]);
613 if (ovs_identifier_is_ufid(&flow
->id
)) {
614 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ti
->node_ver
]);
618 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
619 * accessible as long as the RCU read lock is held.
621 flow_mask_remove(table
, flow
->mask
);
624 static struct sw_flow_mask
*mask_alloc(void)
626 struct sw_flow_mask
*mask
;
628 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
635 static bool mask_equal(const struct sw_flow_mask
*a
,
636 const struct sw_flow_mask
*b
)
638 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
639 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
641 return (a
->range
.end
== b
->range
.end
)
642 && (a
->range
.start
== b
->range
.start
)
643 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
646 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
647 const struct sw_flow_mask
*mask
)
649 struct list_head
*ml
;
651 list_for_each(ml
, &tbl
->mask_list
) {
652 struct sw_flow_mask
*m
;
653 m
= container_of(ml
, struct sw_flow_mask
, list
);
654 if (mask_equal(mask
, m
))
661 /* Add 'mask' into the mask list, if it is not already there. */
662 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
663 const struct sw_flow_mask
*new)
665 struct sw_flow_mask
*mask
;
666 mask
= flow_mask_find(tbl
, new);
668 /* Allocate a new mask if none exsits. */
672 mask
->key
= new->key
;
673 mask
->range
= new->range
;
674 list_add_rcu(&mask
->list
, &tbl
->mask_list
);
676 BUG_ON(!mask
->ref_count
);
684 /* Must be called with OVS mutex held. */
685 static void flow_key_insert(struct flow_table
*table
, struct sw_flow
*flow
)
687 struct table_instance
*new_ti
= NULL
;
688 struct table_instance
*ti
;
690 flow
->flow_table
.hash
= flow_hash(&flow
->key
, &flow
->mask
->range
);
691 ti
= ovsl_dereference(table
->ti
);
692 table_instance_insert(ti
, flow
);
695 /* Expand table, if necessary, to make room. */
696 if (table
->count
> ti
->n_buckets
)
697 new_ti
= table_instance_expand(ti
, false);
698 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
699 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
, false);
702 rcu_assign_pointer(table
->ti
, new_ti
);
703 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
704 table
->last_rehash
= jiffies
;
708 /* Must be called with OVS mutex held. */
709 static void flow_ufid_insert(struct flow_table
*table
, struct sw_flow
*flow
)
711 struct table_instance
*ti
;
713 flow
->ufid_table
.hash
= ufid_hash(&flow
->id
);
714 ti
= ovsl_dereference(table
->ufid_ti
);
715 ufid_table_instance_insert(ti
, flow
);
718 /* Expand table, if necessary, to make room. */
719 if (table
->ufid_count
> ti
->n_buckets
) {
720 struct table_instance
*new_ti
;
722 new_ti
= table_instance_expand(ti
, true);
724 rcu_assign_pointer(table
->ufid_ti
, new_ti
);
725 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
730 /* Must be called with OVS mutex held. */
731 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
732 const struct sw_flow_mask
*mask
)
736 err
= flow_mask_insert(table
, flow
, mask
);
739 flow_key_insert(table
, flow
);
740 if (ovs_identifier_is_ufid(&flow
->id
))
741 flow_ufid_insert(table
, flow
);
746 /* Initializes the flow module.
747 * Returns zero if successful or a negative error code. */
748 int ovs_flow_init(void)
750 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
751 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
753 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
755 * sizeof(struct flow_stats
*)),
757 if (flow_cache
== NULL
)
761 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats
),
762 0, SLAB_HWCACHE_ALIGN
, NULL
);
763 if (flow_stats_cache
== NULL
) {
764 kmem_cache_destroy(flow_cache
);
772 /* Uninitializes the flow module. */
773 void ovs_flow_exit(void)
775 kmem_cache_destroy(flow_stats_cache
);
776 kmem_cache_destroy(flow_cache
);