drivers/net/ethernet/amd/pcnet32.c: neaten and remove unnecessary OOM messages
[linux/fpc-iii.git] / net / openvswitch / flow_table.c
blobcf2d853646f05dc61a1c8f9093782196860ab9bc
1 /*
2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/hash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
47 #define TBL_MIN_BUCKETS 1024
48 #define REHASH_INTERVAL (10 * 60 * HZ)
50 static struct kmem_cache *flow_cache;
51 struct kmem_cache *flow_stats_cache __read_mostly;
53 static u16 range_n_bytes(const struct sw_flow_key_range *range)
55 return range->end - range->start;
58 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
59 const struct sw_flow_mask *mask)
61 const long *m = (const long *)((const u8 *)&mask->key +
62 mask->range.start);
63 const long *s = (const long *)((const u8 *)src +
64 mask->range.start);
65 long *d = (long *)((u8 *)dst + mask->range.start);
66 int i;
68 /* The memory outside of the 'mask->range' are not set since
69 * further operations on 'dst' only uses contents within
70 * 'mask->range'.
72 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
73 *d++ = *s++ & *m++;
76 struct sw_flow *ovs_flow_alloc(void)
78 struct sw_flow *flow;
79 struct flow_stats *stats;
80 int node;
82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
83 if (!flow)
84 return ERR_PTR(-ENOMEM);
86 flow->sf_acts = NULL;
87 flow->mask = NULL;
88 flow->stats_last_writer = NUMA_NO_NODE;
90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO, 0);
93 if (!stats)
94 goto err;
96 spin_lock_init(&stats->lock);
98 RCU_INIT_POINTER(flow->stats[0], stats);
100 for_each_node(node)
101 if (node != 0)
102 RCU_INIT_POINTER(flow->stats[node], NULL);
104 return flow;
105 err:
106 kmem_cache_free(flow_cache, flow);
107 return ERR_PTR(-ENOMEM);
110 int ovs_flow_tbl_count(struct flow_table *table)
112 return table->count;
115 static struct flex_array *alloc_buckets(unsigned int n_buckets)
117 struct flex_array *buckets;
118 int i, err;
120 buckets = flex_array_alloc(sizeof(struct hlist_head),
121 n_buckets, GFP_KERNEL);
122 if (!buckets)
123 return NULL;
125 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
126 if (err) {
127 flex_array_free(buckets);
128 return NULL;
131 for (i = 0; i < n_buckets; i++)
132 INIT_HLIST_HEAD((struct hlist_head *)
133 flex_array_get(buckets, i));
135 return buckets;
138 static void flow_free(struct sw_flow *flow)
140 int node;
142 kfree((struct sw_flow_actions __force *)flow->sf_acts);
143 for_each_node(node)
144 if (flow->stats[node])
145 kmem_cache_free(flow_stats_cache,
146 (struct flow_stats __force *)flow->stats[node]);
147 kmem_cache_free(flow_cache, flow);
150 static void rcu_free_flow_callback(struct rcu_head *rcu)
152 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
154 flow_free(flow);
157 void ovs_flow_free(struct sw_flow *flow, bool deferred)
159 if (!flow)
160 return;
162 if (deferred)
163 call_rcu(&flow->rcu, rcu_free_flow_callback);
164 else
165 flow_free(flow);
168 static void free_buckets(struct flex_array *buckets)
170 flex_array_free(buckets);
174 static void __table_instance_destroy(struct table_instance *ti)
176 free_buckets(ti->buckets);
177 kfree(ti);
180 static struct table_instance *table_instance_alloc(int new_size)
182 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
184 if (!ti)
185 return NULL;
187 ti->buckets = alloc_buckets(new_size);
189 if (!ti->buckets) {
190 kfree(ti);
191 return NULL;
193 ti->n_buckets = new_size;
194 ti->node_ver = 0;
195 ti->keep_flows = false;
196 get_random_bytes(&ti->hash_seed, sizeof(u32));
198 return ti;
201 int ovs_flow_tbl_init(struct flow_table *table)
203 struct table_instance *ti;
205 ti = table_instance_alloc(TBL_MIN_BUCKETS);
207 if (!ti)
208 return -ENOMEM;
210 rcu_assign_pointer(table->ti, ti);
211 INIT_LIST_HEAD(&table->mask_list);
212 table->last_rehash = jiffies;
213 table->count = 0;
214 return 0;
217 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
219 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
221 __table_instance_destroy(ti);
224 static void table_instance_destroy(struct table_instance *ti, bool deferred)
226 int i;
228 if (!ti)
229 return;
231 if (ti->keep_flows)
232 goto skip_flows;
234 for (i = 0; i < ti->n_buckets; i++) {
235 struct sw_flow *flow;
236 struct hlist_head *head = flex_array_get(ti->buckets, i);
237 struct hlist_node *n;
238 int ver = ti->node_ver;
240 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
241 hlist_del_rcu(&flow->hash_node[ver]);
242 ovs_flow_free(flow, deferred);
246 skip_flows:
247 if (deferred)
248 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
249 else
250 __table_instance_destroy(ti);
253 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
255 struct table_instance *ti = ovsl_dereference(table->ti);
257 table_instance_destroy(ti, deferred);
260 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
261 u32 *bucket, u32 *last)
263 struct sw_flow *flow;
264 struct hlist_head *head;
265 int ver;
266 int i;
268 ver = ti->node_ver;
269 while (*bucket < ti->n_buckets) {
270 i = 0;
271 head = flex_array_get(ti->buckets, *bucket);
272 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
273 if (i < *last) {
274 i++;
275 continue;
277 *last = i + 1;
278 return flow;
280 (*bucket)++;
281 *last = 0;
284 return NULL;
287 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
289 hash = jhash_1word(hash, ti->hash_seed);
290 return flex_array_get(ti->buckets,
291 (hash & (ti->n_buckets - 1)));
294 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
296 struct hlist_head *head;
298 head = find_bucket(ti, flow->hash);
299 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
302 static void flow_table_copy_flows(struct table_instance *old,
303 struct table_instance *new)
305 int old_ver;
306 int i;
308 old_ver = old->node_ver;
309 new->node_ver = !old_ver;
311 /* Insert in new table. */
312 for (i = 0; i < old->n_buckets; i++) {
313 struct sw_flow *flow;
314 struct hlist_head *head;
316 head = flex_array_get(old->buckets, i);
318 hlist_for_each_entry(flow, head, hash_node[old_ver])
319 table_instance_insert(new, flow);
322 old->keep_flows = true;
325 static struct table_instance *table_instance_rehash(struct table_instance *ti,
326 int n_buckets)
328 struct table_instance *new_ti;
330 new_ti = table_instance_alloc(n_buckets);
331 if (!new_ti)
332 return NULL;
334 flow_table_copy_flows(ti, new_ti);
336 return new_ti;
339 int ovs_flow_tbl_flush(struct flow_table *flow_table)
341 struct table_instance *old_ti;
342 struct table_instance *new_ti;
344 old_ti = ovsl_dereference(flow_table->ti);
345 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
346 if (!new_ti)
347 return -ENOMEM;
349 rcu_assign_pointer(flow_table->ti, new_ti);
350 flow_table->last_rehash = jiffies;
351 flow_table->count = 0;
353 table_instance_destroy(old_ti, true);
354 return 0;
357 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
358 int key_end)
360 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
361 int hash_u32s = (key_end - key_start) >> 2;
363 /* Make sure number of hash bytes are multiple of u32. */
364 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
366 return arch_fast_hash2(hash_key, hash_u32s, 0);
369 static int flow_key_start(const struct sw_flow_key *key)
371 if (key->tun_key.ipv4_dst)
372 return 0;
373 else
374 return rounddown(offsetof(struct sw_flow_key, phy),
375 sizeof(long));
378 static bool cmp_key(const struct sw_flow_key *key1,
379 const struct sw_flow_key *key2,
380 int key_start, int key_end)
382 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
383 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
384 long diffs = 0;
385 int i;
387 for (i = key_start; i < key_end; i += sizeof(long))
388 diffs |= *cp1++ ^ *cp2++;
390 return diffs == 0;
393 static bool flow_cmp_masked_key(const struct sw_flow *flow,
394 const struct sw_flow_key *key,
395 int key_start, int key_end)
397 return cmp_key(&flow->key, key, key_start, key_end);
400 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
401 struct sw_flow_match *match)
403 struct sw_flow_key *key = match->key;
404 int key_start = flow_key_start(key);
405 int key_end = match->range.end;
407 return cmp_key(&flow->unmasked_key, key, key_start, key_end);
410 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
411 const struct sw_flow_key *unmasked,
412 struct sw_flow_mask *mask)
414 struct sw_flow *flow;
415 struct hlist_head *head;
416 int key_start = mask->range.start;
417 int key_end = mask->range.end;
418 u32 hash;
419 struct sw_flow_key masked_key;
421 ovs_flow_mask_key(&masked_key, unmasked, mask);
422 hash = flow_hash(&masked_key, key_start, key_end);
423 head = find_bucket(ti, hash);
424 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
425 if (flow->mask == mask && flow->hash == hash &&
426 flow_cmp_masked_key(flow, &masked_key,
427 key_start, key_end))
428 return flow;
430 return NULL;
433 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
434 const struct sw_flow_key *key,
435 u32 *n_mask_hit)
437 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
438 struct sw_flow_mask *mask;
439 struct sw_flow *flow;
441 *n_mask_hit = 0;
442 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
443 (*n_mask_hit)++;
444 flow = masked_flow_lookup(ti, key, mask);
445 if (flow) /* Found */
446 return flow;
448 return NULL;
451 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
452 const struct sw_flow_key *key)
454 u32 __always_unused n_mask_hit;
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
459 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
472 return NULL;
475 int ovs_flow_tbl_num_masks(const struct flow_table *table)
477 struct sw_flow_mask *mask;
478 int num = 0;
480 list_for_each_entry(mask, &table->mask_list, list)
481 num++;
483 return num;
486 static struct table_instance *table_instance_expand(struct table_instance *ti)
488 return table_instance_rehash(ti, ti->n_buckets * 2);
491 /* Remove 'mask' from the mask list, if it is not needed any more. */
492 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
494 if (mask) {
495 /* ovs-lock is required to protect mask-refcount and
496 * mask list.
498 ASSERT_OVSL();
499 BUG_ON(!mask->ref_count);
500 mask->ref_count--;
502 if (!mask->ref_count) {
503 list_del_rcu(&mask->list);
504 kfree_rcu(mask, rcu);
509 /* Must be called with OVS mutex held. */
510 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
512 struct table_instance *ti = ovsl_dereference(table->ti);
514 BUG_ON(table->count == 0);
515 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
516 table->count--;
518 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
519 * accessible as long as the RCU read lock is held.
521 flow_mask_remove(table, flow->mask);
524 static struct sw_flow_mask *mask_alloc(void)
526 struct sw_flow_mask *mask;
528 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
529 if (mask)
530 mask->ref_count = 1;
532 return mask;
535 static bool mask_equal(const struct sw_flow_mask *a,
536 const struct sw_flow_mask *b)
538 const u8 *a_ = (const u8 *)&a->key + a->range.start;
539 const u8 *b_ = (const u8 *)&b->key + b->range.start;
541 return (a->range.end == b->range.end)
542 && (a->range.start == b->range.start)
543 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
546 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
547 const struct sw_flow_mask *mask)
549 struct list_head *ml;
551 list_for_each(ml, &tbl->mask_list) {
552 struct sw_flow_mask *m;
553 m = container_of(ml, struct sw_flow_mask, list);
554 if (mask_equal(mask, m))
555 return m;
558 return NULL;
561 /* Add 'mask' into the mask list, if it is not already there. */
562 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
563 struct sw_flow_mask *new)
565 struct sw_flow_mask *mask;
566 mask = flow_mask_find(tbl, new);
567 if (!mask) {
568 /* Allocate a new mask if none exsits. */
569 mask = mask_alloc();
570 if (!mask)
571 return -ENOMEM;
572 mask->key = new->key;
573 mask->range = new->range;
574 list_add_rcu(&mask->list, &tbl->mask_list);
575 } else {
576 BUG_ON(!mask->ref_count);
577 mask->ref_count++;
580 flow->mask = mask;
581 return 0;
584 /* Must be called with OVS mutex held. */
585 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
586 struct sw_flow_mask *mask)
588 struct table_instance *new_ti = NULL;
589 struct table_instance *ti;
590 int err;
592 err = flow_mask_insert(table, flow, mask);
593 if (err)
594 return err;
596 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
597 flow->mask->range.end);
598 ti = ovsl_dereference(table->ti);
599 table_instance_insert(ti, flow);
600 table->count++;
602 /* Expand table, if necessary, to make room. */
603 if (table->count > ti->n_buckets)
604 new_ti = table_instance_expand(ti);
605 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
606 new_ti = table_instance_rehash(ti, ti->n_buckets);
608 if (new_ti) {
609 rcu_assign_pointer(table->ti, new_ti);
610 table_instance_destroy(ti, true);
611 table->last_rehash = jiffies;
613 return 0;
616 /* Initializes the flow module.
617 * Returns zero if successful or a negative error code. */
618 int ovs_flow_init(void)
620 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
621 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
623 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
624 + (num_possible_nodes()
625 * sizeof(struct flow_stats *)),
626 0, 0, NULL);
627 if (flow_cache == NULL)
628 return -ENOMEM;
630 flow_stats_cache
631 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
632 0, SLAB_HWCACHE_ALIGN, NULL);
633 if (flow_stats_cache == NULL) {
634 kmem_cache_destroy(flow_cache);
635 flow_cache = NULL;
636 return -ENOMEM;
639 return 0;
642 /* Uninitializes the flow module. */
643 void ovs_flow_exit(void)
645 kmem_cache_destroy(flow_stats_cache);
646 kmem_cache_destroy(flow_cache);