2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
47 #define TBL_MIN_BUCKETS 1024
48 #define REHASH_INTERVAL (10 * 60 * HZ)
50 static struct kmem_cache
*flow_cache
;
51 struct kmem_cache
*flow_stats_cache __read_mostly
;
53 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
55 return range
->end
- range
->start
;
58 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
59 bool full
, const struct sw_flow_mask
*mask
)
61 int start
= full
? 0 : mask
->range
.start
;
62 int len
= full
? sizeof *dst
: range_n_bytes(&mask
->range
);
63 const long *m
= (const long *)((const u8
*)&mask
->key
+ start
);
64 const long *s
= (const long *)((const u8
*)src
+ start
);
65 long *d
= (long *)((u8
*)dst
+ start
);
68 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
69 * if 'full' is false the memory outside of the 'mask->range' is left
70 * uninitialized. This can be used as an optimization when further
71 * operations on 'dst' only use contents within 'mask->range'.
73 for (i
= 0; i
< len
; i
+= sizeof(long))
77 struct sw_flow
*ovs_flow_alloc(void)
80 struct flow_stats
*stats
;
83 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
85 return ERR_PTR(-ENOMEM
);
89 flow
->id
.unmasked_key
= NULL
;
90 flow
->id
.ufid_len
= 0;
91 flow
->stats_last_writer
= NUMA_NO_NODE
;
93 /* Initialize the default stat node. */
94 stats
= kmem_cache_alloc_node(flow_stats_cache
,
95 GFP_KERNEL
| __GFP_ZERO
,
96 node_online(0) ? 0 : NUMA_NO_NODE
);
100 spin_lock_init(&stats
->lock
);
102 RCU_INIT_POINTER(flow
->stats
[0], stats
);
106 RCU_INIT_POINTER(flow
->stats
[node
], NULL
);
110 kmem_cache_free(flow_cache
, flow
);
111 return ERR_PTR(-ENOMEM
);
114 int ovs_flow_tbl_count(const struct flow_table
*table
)
119 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
121 struct flex_array
*buckets
;
124 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
125 n_buckets
, GFP_KERNEL
);
129 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
131 flex_array_free(buckets
);
135 for (i
= 0; i
< n_buckets
; i
++)
136 INIT_HLIST_HEAD((struct hlist_head
*)
137 flex_array_get(buckets
, i
));
142 static void flow_free(struct sw_flow
*flow
)
146 if (ovs_identifier_is_key(&flow
->id
))
147 kfree(flow
->id
.unmasked_key
);
148 kfree((struct sw_flow_actions __force
*)flow
->sf_acts
);
150 if (flow
->stats
[node
])
151 kmem_cache_free(flow_stats_cache
,
152 (struct flow_stats __force
*)flow
->stats
[node
]);
153 kmem_cache_free(flow_cache
, flow
);
156 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
158 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
163 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
169 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
174 static void free_buckets(struct flex_array
*buckets
)
176 flex_array_free(buckets
);
180 static void __table_instance_destroy(struct table_instance
*ti
)
182 free_buckets(ti
->buckets
);
186 static struct table_instance
*table_instance_alloc(int new_size
)
188 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
193 ti
->buckets
= alloc_buckets(new_size
);
199 ti
->n_buckets
= new_size
;
201 ti
->keep_flows
= false;
202 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
207 int ovs_flow_tbl_init(struct flow_table
*table
)
209 struct table_instance
*ti
, *ufid_ti
;
211 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
216 ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
220 rcu_assign_pointer(table
->ti
, ti
);
221 rcu_assign_pointer(table
->ufid_ti
, ufid_ti
);
222 INIT_LIST_HEAD(&table
->mask_list
);
223 table
->last_rehash
= jiffies
;
225 table
->ufid_count
= 0;
229 __table_instance_destroy(ti
);
233 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
235 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
237 __table_instance_destroy(ti
);
240 static void table_instance_destroy(struct table_instance
*ti
,
241 struct table_instance
*ufid_ti
,
253 for (i
= 0; i
< ti
->n_buckets
; i
++) {
254 struct sw_flow
*flow
;
255 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
256 struct hlist_node
*n
;
257 int ver
= ti
->node_ver
;
258 int ufid_ver
= ufid_ti
->node_ver
;
260 hlist_for_each_entry_safe(flow
, n
, head
, flow_table
.node
[ver
]) {
261 hlist_del_rcu(&flow
->flow_table
.node
[ver
]);
262 if (ovs_identifier_is_ufid(&flow
->id
))
263 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ver
]);
264 ovs_flow_free(flow
, deferred
);
270 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
271 call_rcu(&ufid_ti
->rcu
, flow_tbl_destroy_rcu_cb
);
273 __table_instance_destroy(ti
);
274 __table_instance_destroy(ufid_ti
);
278 /* No need for locking this function is called from RCU callback or
281 void ovs_flow_tbl_destroy(struct flow_table
*table
)
283 struct table_instance
*ti
= rcu_dereference_raw(table
->ti
);
284 struct table_instance
*ufid_ti
= rcu_dereference_raw(table
->ufid_ti
);
286 table_instance_destroy(ti
, ufid_ti
, false);
289 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
290 u32
*bucket
, u32
*last
)
292 struct sw_flow
*flow
;
293 struct hlist_head
*head
;
298 while (*bucket
< ti
->n_buckets
) {
300 head
= flex_array_get(ti
->buckets
, *bucket
);
301 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ver
]) {
316 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
318 hash
= jhash_1word(hash
, ti
->hash_seed
);
319 return flex_array_get(ti
->buckets
,
320 (hash
& (ti
->n_buckets
- 1)));
323 static void table_instance_insert(struct table_instance
*ti
,
324 struct sw_flow
*flow
)
326 struct hlist_head
*head
;
328 head
= find_bucket(ti
, flow
->flow_table
.hash
);
329 hlist_add_head_rcu(&flow
->flow_table
.node
[ti
->node_ver
], head
);
332 static void ufid_table_instance_insert(struct table_instance
*ti
,
333 struct sw_flow
*flow
)
335 struct hlist_head
*head
;
337 head
= find_bucket(ti
, flow
->ufid_table
.hash
);
338 hlist_add_head_rcu(&flow
->ufid_table
.node
[ti
->node_ver
], head
);
341 static void flow_table_copy_flows(struct table_instance
*old
,
342 struct table_instance
*new, bool ufid
)
347 old_ver
= old
->node_ver
;
348 new->node_ver
= !old_ver
;
350 /* Insert in new table. */
351 for (i
= 0; i
< old
->n_buckets
; i
++) {
352 struct sw_flow
*flow
;
353 struct hlist_head
*head
;
355 head
= flex_array_get(old
->buckets
, i
);
358 hlist_for_each_entry(flow
, head
,
359 ufid_table
.node
[old_ver
])
360 ufid_table_instance_insert(new, flow
);
362 hlist_for_each_entry(flow
, head
,
363 flow_table
.node
[old_ver
])
364 table_instance_insert(new, flow
);
367 old
->keep_flows
= true;
370 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
371 int n_buckets
, bool ufid
)
373 struct table_instance
*new_ti
;
375 new_ti
= table_instance_alloc(n_buckets
);
379 flow_table_copy_flows(ti
, new_ti
, ufid
);
384 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
386 struct table_instance
*old_ti
, *new_ti
;
387 struct table_instance
*old_ufid_ti
, *new_ufid_ti
;
389 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
392 new_ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
396 old_ti
= ovsl_dereference(flow_table
->ti
);
397 old_ufid_ti
= ovsl_dereference(flow_table
->ufid_ti
);
399 rcu_assign_pointer(flow_table
->ti
, new_ti
);
400 rcu_assign_pointer(flow_table
->ufid_ti
, new_ufid_ti
);
401 flow_table
->last_rehash
= jiffies
;
402 flow_table
->count
= 0;
403 flow_table
->ufid_count
= 0;
405 table_instance_destroy(old_ti
, old_ufid_ti
, true);
409 __table_instance_destroy(new_ti
);
413 static u32
flow_hash(const struct sw_flow_key
*key
,
414 const struct sw_flow_key_range
*range
)
416 int key_start
= range
->start
;
417 int key_end
= range
->end
;
418 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ key_start
);
419 int hash_u32s
= (key_end
- key_start
) >> 2;
421 /* Make sure number of hash bytes are multiple of u32. */
422 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
424 return jhash2(hash_key
, hash_u32s
, 0);
427 static int flow_key_start(const struct sw_flow_key
*key
)
429 if (key
->tun_key
.ipv4_dst
)
432 return rounddown(offsetof(struct sw_flow_key
, phy
),
436 static bool cmp_key(const struct sw_flow_key
*key1
,
437 const struct sw_flow_key
*key2
,
438 int key_start
, int key_end
)
440 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
441 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
445 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
446 diffs
|= *cp1
++ ^ *cp2
++;
451 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
452 const struct sw_flow_key
*key
,
453 const struct sw_flow_key_range
*range
)
455 return cmp_key(&flow
->key
, key
, range
->start
, range
->end
);
458 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
459 const struct sw_flow_match
*match
)
461 struct sw_flow_key
*key
= match
->key
;
462 int key_start
= flow_key_start(key
);
463 int key_end
= match
->range
.end
;
465 BUG_ON(ovs_identifier_is_ufid(&flow
->id
));
466 return cmp_key(flow
->id
.unmasked_key
, key
, key_start
, key_end
);
469 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
470 const struct sw_flow_key
*unmasked
,
471 const struct sw_flow_mask
*mask
)
473 struct sw_flow
*flow
;
474 struct hlist_head
*head
;
476 struct sw_flow_key masked_key
;
478 ovs_flow_mask_key(&masked_key
, unmasked
, false, mask
);
479 hash
= flow_hash(&masked_key
, &mask
->range
);
480 head
= find_bucket(ti
, hash
);
481 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ti
->node_ver
]) {
482 if (flow
->mask
== mask
&& flow
->flow_table
.hash
== hash
&&
483 flow_cmp_masked_key(flow
, &masked_key
, &mask
->range
))
489 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
490 const struct sw_flow_key
*key
,
493 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
494 struct sw_flow_mask
*mask
;
495 struct sw_flow
*flow
;
498 list_for_each_entry_rcu(mask
, &tbl
->mask_list
, list
) {
500 flow
= masked_flow_lookup(ti
, key
, mask
);
501 if (flow
) /* Found */
507 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
508 const struct sw_flow_key
*key
)
510 u32 __always_unused n_mask_hit
;
512 return ovs_flow_tbl_lookup_stats(tbl
, key
, &n_mask_hit
);
515 struct sw_flow
*ovs_flow_tbl_lookup_exact(struct flow_table
*tbl
,
516 const struct sw_flow_match
*match
)
518 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
519 struct sw_flow_mask
*mask
;
520 struct sw_flow
*flow
;
522 /* Always called under ovs-mutex. */
523 list_for_each_entry(mask
, &tbl
->mask_list
, list
) {
524 flow
= masked_flow_lookup(ti
, match
->key
, mask
);
525 if (flow
&& ovs_identifier_is_key(&flow
->id
) &&
526 ovs_flow_cmp_unmasked_key(flow
, match
))
532 static u32
ufid_hash(const struct sw_flow_id
*sfid
)
534 return jhash(sfid
->ufid
, sfid
->ufid_len
, 0);
537 static bool ovs_flow_cmp_ufid(const struct sw_flow
*flow
,
538 const struct sw_flow_id
*sfid
)
540 if (flow
->id
.ufid_len
!= sfid
->ufid_len
)
543 return !memcmp(flow
->id
.ufid
, sfid
->ufid
, sfid
->ufid_len
);
546 bool ovs_flow_cmp(const struct sw_flow
*flow
, const struct sw_flow_match
*match
)
548 if (ovs_identifier_is_ufid(&flow
->id
))
549 return flow_cmp_masked_key(flow
, match
->key
, &match
->range
);
551 return ovs_flow_cmp_unmasked_key(flow
, match
);
554 struct sw_flow
*ovs_flow_tbl_lookup_ufid(struct flow_table
*tbl
,
555 const struct sw_flow_id
*ufid
)
557 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ufid_ti
);
558 struct sw_flow
*flow
;
559 struct hlist_head
*head
;
562 hash
= ufid_hash(ufid
);
563 head
= find_bucket(ti
, hash
);
564 hlist_for_each_entry_rcu(flow
, head
, ufid_table
.node
[ti
->node_ver
]) {
565 if (flow
->ufid_table
.hash
== hash
&&
566 ovs_flow_cmp_ufid(flow
, ufid
))
572 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
574 struct sw_flow_mask
*mask
;
577 list_for_each_entry(mask
, &table
->mask_list
, list
)
583 static struct table_instance
*table_instance_expand(struct table_instance
*ti
,
586 return table_instance_rehash(ti
, ti
->n_buckets
* 2, ufid
);
589 /* Remove 'mask' from the mask list, if it is not needed any more. */
590 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
593 /* ovs-lock is required to protect mask-refcount and
597 BUG_ON(!mask
->ref_count
);
600 if (!mask
->ref_count
) {
601 list_del_rcu(&mask
->list
);
602 kfree_rcu(mask
, rcu
);
607 /* Must be called with OVS mutex held. */
608 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
610 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
611 struct table_instance
*ufid_ti
= ovsl_dereference(table
->ufid_ti
);
613 BUG_ON(table
->count
== 0);
614 hlist_del_rcu(&flow
->flow_table
.node
[ti
->node_ver
]);
616 if (ovs_identifier_is_ufid(&flow
->id
)) {
617 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ti
->node_ver
]);
621 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
622 * accessible as long as the RCU read lock is held.
624 flow_mask_remove(table
, flow
->mask
);
627 static struct sw_flow_mask
*mask_alloc(void)
629 struct sw_flow_mask
*mask
;
631 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
638 static bool mask_equal(const struct sw_flow_mask
*a
,
639 const struct sw_flow_mask
*b
)
641 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
642 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
644 return (a
->range
.end
== b
->range
.end
)
645 && (a
->range
.start
== b
->range
.start
)
646 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
649 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
650 const struct sw_flow_mask
*mask
)
652 struct list_head
*ml
;
654 list_for_each(ml
, &tbl
->mask_list
) {
655 struct sw_flow_mask
*m
;
656 m
= container_of(ml
, struct sw_flow_mask
, list
);
657 if (mask_equal(mask
, m
))
664 /* Add 'mask' into the mask list, if it is not already there. */
665 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
666 const struct sw_flow_mask
*new)
668 struct sw_flow_mask
*mask
;
669 mask
= flow_mask_find(tbl
, new);
671 /* Allocate a new mask if none exsits. */
675 mask
->key
= new->key
;
676 mask
->range
= new->range
;
677 list_add_rcu(&mask
->list
, &tbl
->mask_list
);
679 BUG_ON(!mask
->ref_count
);
687 /* Must be called with OVS mutex held. */
688 static void flow_key_insert(struct flow_table
*table
, struct sw_flow
*flow
)
690 struct table_instance
*new_ti
= NULL
;
691 struct table_instance
*ti
;
693 flow
->flow_table
.hash
= flow_hash(&flow
->key
, &flow
->mask
->range
);
694 ti
= ovsl_dereference(table
->ti
);
695 table_instance_insert(ti
, flow
);
698 /* Expand table, if necessary, to make room. */
699 if (table
->count
> ti
->n_buckets
)
700 new_ti
= table_instance_expand(ti
, false);
701 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
702 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
, false);
705 rcu_assign_pointer(table
->ti
, new_ti
);
706 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
707 table
->last_rehash
= jiffies
;
711 /* Must be called with OVS mutex held. */
712 static void flow_ufid_insert(struct flow_table
*table
, struct sw_flow
*flow
)
714 struct table_instance
*ti
;
716 flow
->ufid_table
.hash
= ufid_hash(&flow
->id
);
717 ti
= ovsl_dereference(table
->ufid_ti
);
718 ufid_table_instance_insert(ti
, flow
);
721 /* Expand table, if necessary, to make room. */
722 if (table
->ufid_count
> ti
->n_buckets
) {
723 struct table_instance
*new_ti
;
725 new_ti
= table_instance_expand(ti
, true);
727 rcu_assign_pointer(table
->ufid_ti
, new_ti
);
728 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
733 /* Must be called with OVS mutex held. */
734 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
735 const struct sw_flow_mask
*mask
)
739 err
= flow_mask_insert(table
, flow
, mask
);
742 flow_key_insert(table
, flow
);
743 if (ovs_identifier_is_ufid(&flow
->id
))
744 flow_ufid_insert(table
, flow
);
749 /* Initializes the flow module.
750 * Returns zero if successful or a negative error code. */
751 int ovs_flow_init(void)
753 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
754 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
756 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
757 + (num_possible_nodes()
758 * sizeof(struct flow_stats
*)),
760 if (flow_cache
== NULL
)
764 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats
),
765 0, SLAB_HWCACHE_ALIGN
, NULL
);
766 if (flow_stats_cache
== NULL
) {
767 kmem_cache_destroy(flow_cache
);
775 /* Uninitializes the flow module. */
776 void ovs_flow_exit(void)
778 kmem_cache_destroy(flow_stats_cache
);
779 kmem_cache_destroy(flow_cache
);