Linux 4.1.16
[linux/fpc-iii.git] / net / openvswitch / flow_table.c
blobeed562295c7881757383e1712140e0d8ec3362b0
1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
47 #define TBL_MIN_BUCKETS 1024
48 #define REHASH_INTERVAL (10 * 60 * HZ)
50 static struct kmem_cache *flow_cache;
51 struct kmem_cache *flow_stats_cache __read_mostly;
53 static u16 range_n_bytes(const struct sw_flow_key_range *range)
55 return range->end - range->start;
58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 bool full, const struct sw_flow_mask *mask)
61 int start = full ? 0 : mask->range.start;
62 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
63 const long *m = (const long *)((const u8 *)&mask->key + start);
64 const long *s = (const long *)((const u8 *)src + start);
65 long *d = (long *)((u8 *)dst + start);
66 int i;
68 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
69 * if 'full' is false the memory outside of the 'mask->range' is left
70 * uninitialized. This can be used as an optimization when further
71 * operations on 'dst' only use contents within 'mask->range'.
73 for (i = 0; i < len; i += sizeof(long))
74 *d++ = *s++ & *m++;
77 struct sw_flow *ovs_flow_alloc(void)
79 struct sw_flow *flow;
80 struct flow_stats *stats;
81 int node;
83 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
84 if (!flow)
85 return ERR_PTR(-ENOMEM);
87 flow->sf_acts = NULL;
88 flow->mask = NULL;
89 flow->id.unmasked_key = NULL;
90 flow->id.ufid_len = 0;
91 flow->stats_last_writer = NUMA_NO_NODE;
93 /* Initialize the default stat node. */
94 stats = kmem_cache_alloc_node(flow_stats_cache,
95 GFP_KERNEL | __GFP_ZERO,
96 node_online(0) ? 0 : NUMA_NO_NODE);
97 if (!stats)
98 goto err;
100 spin_lock_init(&stats->lock);
102 RCU_INIT_POINTER(flow->stats[0], stats);
104 for_each_node(node)
105 if (node != 0)
106 RCU_INIT_POINTER(flow->stats[node], NULL);
108 return flow;
109 err:
110 kmem_cache_free(flow_cache, flow);
111 return ERR_PTR(-ENOMEM);
114 int ovs_flow_tbl_count(const struct flow_table *table)
116 return table->count;
119 static struct flex_array *alloc_buckets(unsigned int n_buckets)
121 struct flex_array *buckets;
122 int i, err;
124 buckets = flex_array_alloc(sizeof(struct hlist_head),
125 n_buckets, GFP_KERNEL);
126 if (!buckets)
127 return NULL;
129 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
130 if (err) {
131 flex_array_free(buckets);
132 return NULL;
135 for (i = 0; i < n_buckets; i++)
136 INIT_HLIST_HEAD((struct hlist_head *)
137 flex_array_get(buckets, i));
139 return buckets;
142 static void flow_free(struct sw_flow *flow)
144 int node;
146 if (ovs_identifier_is_key(&flow->id))
147 kfree(flow->id.unmasked_key);
148 kfree((struct sw_flow_actions __force *)flow->sf_acts);
149 for_each_node(node)
150 if (flow->stats[node])
151 kmem_cache_free(flow_stats_cache,
152 (struct flow_stats __force *)flow->stats[node]);
153 kmem_cache_free(flow_cache, flow);
156 static void rcu_free_flow_callback(struct rcu_head *rcu)
158 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
160 flow_free(flow);
163 void ovs_flow_free(struct sw_flow *flow, bool deferred)
165 if (!flow)
166 return;
168 if (deferred)
169 call_rcu(&flow->rcu, rcu_free_flow_callback);
170 else
171 flow_free(flow);
174 static void free_buckets(struct flex_array *buckets)
176 flex_array_free(buckets);
180 static void __table_instance_destroy(struct table_instance *ti)
182 free_buckets(ti->buckets);
183 kfree(ti);
186 static struct table_instance *table_instance_alloc(int new_size)
188 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
190 if (!ti)
191 return NULL;
193 ti->buckets = alloc_buckets(new_size);
195 if (!ti->buckets) {
196 kfree(ti);
197 return NULL;
199 ti->n_buckets = new_size;
200 ti->node_ver = 0;
201 ti->keep_flows = false;
202 get_random_bytes(&ti->hash_seed, sizeof(u32));
204 return ti;
207 int ovs_flow_tbl_init(struct flow_table *table)
209 struct table_instance *ti, *ufid_ti;
211 ti = table_instance_alloc(TBL_MIN_BUCKETS);
213 if (!ti)
214 return -ENOMEM;
216 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
217 if (!ufid_ti)
218 goto free_ti;
220 rcu_assign_pointer(table->ti, ti);
221 rcu_assign_pointer(table->ufid_ti, ufid_ti);
222 INIT_LIST_HEAD(&table->mask_list);
223 table->last_rehash = jiffies;
224 table->count = 0;
225 table->ufid_count = 0;
226 return 0;
228 free_ti:
229 __table_instance_destroy(ti);
230 return -ENOMEM;
233 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
235 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
237 __table_instance_destroy(ti);
240 static void table_instance_destroy(struct table_instance *ti,
241 struct table_instance *ufid_ti,
242 bool deferred)
244 int i;
246 if (!ti)
247 return;
249 BUG_ON(!ufid_ti);
250 if (ti->keep_flows)
251 goto skip_flows;
253 for (i = 0; i < ti->n_buckets; i++) {
254 struct sw_flow *flow;
255 struct hlist_head *head = flex_array_get(ti->buckets, i);
256 struct hlist_node *n;
257 int ver = ti->node_ver;
258 int ufid_ver = ufid_ti->node_ver;
260 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
261 hlist_del_rcu(&flow->flow_table.node[ver]);
262 if (ovs_identifier_is_ufid(&flow->id))
263 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
264 ovs_flow_free(flow, deferred);
268 skip_flows:
269 if (deferred) {
270 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
271 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
272 } else {
273 __table_instance_destroy(ti);
274 __table_instance_destroy(ufid_ti);
278 /* No need for locking this function is called from RCU callback or
279 * error path.
281 void ovs_flow_tbl_destroy(struct flow_table *table)
283 struct table_instance *ti = rcu_dereference_raw(table->ti);
284 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
286 table_instance_destroy(ti, ufid_ti, false);
289 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
290 u32 *bucket, u32 *last)
292 struct sw_flow *flow;
293 struct hlist_head *head;
294 int ver;
295 int i;
297 ver = ti->node_ver;
298 while (*bucket < ti->n_buckets) {
299 i = 0;
300 head = flex_array_get(ti->buckets, *bucket);
301 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
302 if (i < *last) {
303 i++;
304 continue;
306 *last = i + 1;
307 return flow;
309 (*bucket)++;
310 *last = 0;
313 return NULL;
316 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
318 hash = jhash_1word(hash, ti->hash_seed);
319 return flex_array_get(ti->buckets,
320 (hash & (ti->n_buckets - 1)));
323 static void table_instance_insert(struct table_instance *ti,
324 struct sw_flow *flow)
326 struct hlist_head *head;
328 head = find_bucket(ti, flow->flow_table.hash);
329 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
332 static void ufid_table_instance_insert(struct table_instance *ti,
333 struct sw_flow *flow)
335 struct hlist_head *head;
337 head = find_bucket(ti, flow->ufid_table.hash);
338 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
341 static void flow_table_copy_flows(struct table_instance *old,
342 struct table_instance *new, bool ufid)
344 int old_ver;
345 int i;
347 old_ver = old->node_ver;
348 new->node_ver = !old_ver;
350 /* Insert in new table. */
351 for (i = 0; i < old->n_buckets; i++) {
352 struct sw_flow *flow;
353 struct hlist_head *head;
355 head = flex_array_get(old->buckets, i);
357 if (ufid)
358 hlist_for_each_entry(flow, head,
359 ufid_table.node[old_ver])
360 ufid_table_instance_insert(new, flow);
361 else
362 hlist_for_each_entry(flow, head,
363 flow_table.node[old_ver])
364 table_instance_insert(new, flow);
367 old->keep_flows = true;
370 static struct table_instance *table_instance_rehash(struct table_instance *ti,
371 int n_buckets, bool ufid)
373 struct table_instance *new_ti;
375 new_ti = table_instance_alloc(n_buckets);
376 if (!new_ti)
377 return NULL;
379 flow_table_copy_flows(ti, new_ti, ufid);
381 return new_ti;
384 int ovs_flow_tbl_flush(struct flow_table *flow_table)
386 struct table_instance *old_ti, *new_ti;
387 struct table_instance *old_ufid_ti, *new_ufid_ti;
389 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390 if (!new_ti)
391 return -ENOMEM;
392 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
393 if (!new_ufid_ti)
394 goto err_free_ti;
396 old_ti = ovsl_dereference(flow_table->ti);
397 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
399 rcu_assign_pointer(flow_table->ti, new_ti);
400 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
401 flow_table->last_rehash = jiffies;
402 flow_table->count = 0;
403 flow_table->ufid_count = 0;
405 table_instance_destroy(old_ti, old_ufid_ti, true);
406 return 0;
408 err_free_ti:
409 __table_instance_destroy(new_ti);
410 return -ENOMEM;
413 static u32 flow_hash(const struct sw_flow_key *key,
414 const struct sw_flow_key_range *range)
416 int key_start = range->start;
417 int key_end = range->end;
418 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
419 int hash_u32s = (key_end - key_start) >> 2;
421 /* Make sure number of hash bytes are multiple of u32. */
422 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
424 return jhash2(hash_key, hash_u32s, 0);
427 static int flow_key_start(const struct sw_flow_key *key)
429 if (key->tun_key.ipv4_dst)
430 return 0;
431 else
432 return rounddown(offsetof(struct sw_flow_key, phy),
433 sizeof(long));
436 static bool cmp_key(const struct sw_flow_key *key1,
437 const struct sw_flow_key *key2,
438 int key_start, int key_end)
440 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
441 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
442 long diffs = 0;
443 int i;
445 for (i = key_start; i < key_end; i += sizeof(long))
446 diffs |= *cp1++ ^ *cp2++;
448 return diffs == 0;
451 static bool flow_cmp_masked_key(const struct sw_flow *flow,
452 const struct sw_flow_key *key,
453 const struct sw_flow_key_range *range)
455 return cmp_key(&flow->key, key, range->start, range->end);
458 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
459 const struct sw_flow_match *match)
461 struct sw_flow_key *key = match->key;
462 int key_start = flow_key_start(key);
463 int key_end = match->range.end;
465 BUG_ON(ovs_identifier_is_ufid(&flow->id));
466 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
469 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
470 const struct sw_flow_key *unmasked,
471 const struct sw_flow_mask *mask)
473 struct sw_flow *flow;
474 struct hlist_head *head;
475 u32 hash;
476 struct sw_flow_key masked_key;
478 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
479 hash = flow_hash(&masked_key, &mask->range);
480 head = find_bucket(ti, hash);
481 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
482 if (flow->mask == mask && flow->flow_table.hash == hash &&
483 flow_cmp_masked_key(flow, &masked_key, &mask->range))
484 return flow;
486 return NULL;
489 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
490 const struct sw_flow_key *key,
491 u32 *n_mask_hit)
493 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
494 struct sw_flow_mask *mask;
495 struct sw_flow *flow;
497 *n_mask_hit = 0;
498 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
499 (*n_mask_hit)++;
500 flow = masked_flow_lookup(ti, key, mask);
501 if (flow) /* Found */
502 return flow;
504 return NULL;
507 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
508 const struct sw_flow_key *key)
510 u32 __always_unused n_mask_hit;
512 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
515 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
516 const struct sw_flow_match *match)
518 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
519 struct sw_flow_mask *mask;
520 struct sw_flow *flow;
522 /* Always called under ovs-mutex. */
523 list_for_each_entry(mask, &tbl->mask_list, list) {
524 flow = masked_flow_lookup(ti, match->key, mask);
525 if (flow && ovs_identifier_is_key(&flow->id) &&
526 ovs_flow_cmp_unmasked_key(flow, match))
527 return flow;
529 return NULL;
532 static u32 ufid_hash(const struct sw_flow_id *sfid)
534 return jhash(sfid->ufid, sfid->ufid_len, 0);
537 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
538 const struct sw_flow_id *sfid)
540 if (flow->id.ufid_len != sfid->ufid_len)
541 return false;
543 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
546 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
548 if (ovs_identifier_is_ufid(&flow->id))
549 return flow_cmp_masked_key(flow, match->key, &match->range);
551 return ovs_flow_cmp_unmasked_key(flow, match);
554 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
555 const struct sw_flow_id *ufid)
557 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
558 struct sw_flow *flow;
559 struct hlist_head *head;
560 u32 hash;
562 hash = ufid_hash(ufid);
563 head = find_bucket(ti, hash);
564 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
565 if (flow->ufid_table.hash == hash &&
566 ovs_flow_cmp_ufid(flow, ufid))
567 return flow;
569 return NULL;
572 int ovs_flow_tbl_num_masks(const struct flow_table *table)
574 struct sw_flow_mask *mask;
575 int num = 0;
577 list_for_each_entry(mask, &table->mask_list, list)
578 num++;
580 return num;
583 static struct table_instance *table_instance_expand(struct table_instance *ti,
584 bool ufid)
586 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
589 /* Remove 'mask' from the mask list, if it is not needed any more. */
590 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
592 if (mask) {
593 /* ovs-lock is required to protect mask-refcount and
594 * mask list.
596 ASSERT_OVSL();
597 BUG_ON(!mask->ref_count);
598 mask->ref_count--;
600 if (!mask->ref_count) {
601 list_del_rcu(&mask->list);
602 kfree_rcu(mask, rcu);
607 /* Must be called with OVS mutex held. */
608 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
610 struct table_instance *ti = ovsl_dereference(table->ti);
611 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
613 BUG_ON(table->count == 0);
614 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
615 table->count--;
616 if (ovs_identifier_is_ufid(&flow->id)) {
617 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
618 table->ufid_count--;
621 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
622 * accessible as long as the RCU read lock is held.
624 flow_mask_remove(table, flow->mask);
627 static struct sw_flow_mask *mask_alloc(void)
629 struct sw_flow_mask *mask;
631 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
632 if (mask)
633 mask->ref_count = 1;
635 return mask;
638 static bool mask_equal(const struct sw_flow_mask *a,
639 const struct sw_flow_mask *b)
641 const u8 *a_ = (const u8 *)&a->key + a->range.start;
642 const u8 *b_ = (const u8 *)&b->key + b->range.start;
644 return (a->range.end == b->range.end)
645 && (a->range.start == b->range.start)
646 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
649 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
650 const struct sw_flow_mask *mask)
652 struct list_head *ml;
654 list_for_each(ml, &tbl->mask_list) {
655 struct sw_flow_mask *m;
656 m = container_of(ml, struct sw_flow_mask, list);
657 if (mask_equal(mask, m))
658 return m;
661 return NULL;
664 /* Add 'mask' into the mask list, if it is not already there. */
665 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
666 const struct sw_flow_mask *new)
668 struct sw_flow_mask *mask;
669 mask = flow_mask_find(tbl, new);
670 if (!mask) {
671 /* Allocate a new mask if none exsits. */
672 mask = mask_alloc();
673 if (!mask)
674 return -ENOMEM;
675 mask->key = new->key;
676 mask->range = new->range;
677 list_add_rcu(&mask->list, &tbl->mask_list);
678 } else {
679 BUG_ON(!mask->ref_count);
680 mask->ref_count++;
683 flow->mask = mask;
684 return 0;
687 /* Must be called with OVS mutex held. */
688 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
690 struct table_instance *new_ti = NULL;
691 struct table_instance *ti;
693 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
694 ti = ovsl_dereference(table->ti);
695 table_instance_insert(ti, flow);
696 table->count++;
698 /* Expand table, if necessary, to make room. */
699 if (table->count > ti->n_buckets)
700 new_ti = table_instance_expand(ti, false);
701 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
702 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
704 if (new_ti) {
705 rcu_assign_pointer(table->ti, new_ti);
706 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
707 table->last_rehash = jiffies;
711 /* Must be called with OVS mutex held. */
712 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
714 struct table_instance *ti;
716 flow->ufid_table.hash = ufid_hash(&flow->id);
717 ti = ovsl_dereference(table->ufid_ti);
718 ufid_table_instance_insert(ti, flow);
719 table->ufid_count++;
721 /* Expand table, if necessary, to make room. */
722 if (table->ufid_count > ti->n_buckets) {
723 struct table_instance *new_ti;
725 new_ti = table_instance_expand(ti, true);
726 if (new_ti) {
727 rcu_assign_pointer(table->ufid_ti, new_ti);
728 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
733 /* Must be called with OVS mutex held. */
734 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
735 const struct sw_flow_mask *mask)
737 int err;
739 err = flow_mask_insert(table, flow, mask);
740 if (err)
741 return err;
742 flow_key_insert(table, flow);
743 if (ovs_identifier_is_ufid(&flow->id))
744 flow_ufid_insert(table, flow);
746 return 0;
749 /* Initializes the flow module.
750 * Returns zero if successful or a negative error code. */
751 int ovs_flow_init(void)
753 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
754 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
756 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
757 + (num_possible_nodes()
758 * sizeof(struct flow_stats *)),
759 0, 0, NULL);
760 if (flow_cache == NULL)
761 return -ENOMEM;
763 flow_stats_cache
764 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
765 0, SLAB_HWCACHE_ALIGN, NULL);
766 if (flow_stats_cache == NULL) {
767 kmem_cache_destroy(flow_cache);
768 flow_cache = NULL;
769 return -ENOMEM;
772 return 0;
775 /* Uninitializes the flow module. */
776 void ovs_flow_exit(void)
778 kmem_cache_destroy(flow_stats_cache);
779 kmem_cache_destroy(flow_cache);