seccomp: Fix ioctl number for SECCOMP_IOCTL_NOTIF_ID_VALID
[linux/fpc-iii.git] / net / openvswitch / flow_table.c
blobcf3582c5ed70c388fd019df293d14172ce27422b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2007-2014 Nicira, Inc.
4 */
6 #include "flow.h"
7 #include "datapath.h"
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
20 #include <linux/in.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <net/ip.h>
33 #include <net/ipv6.h>
34 #include <net/ndisc.h>
36 #define TBL_MIN_BUCKETS 1024
37 #define REHASH_INTERVAL (10 * 60 * HZ)
39 static struct kmem_cache *flow_cache;
40 struct kmem_cache *flow_stats_cache __read_mostly;
42 static u16 range_n_bytes(const struct sw_flow_key_range *range)
44 return range->end - range->start;
47 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
48 bool full, const struct sw_flow_mask *mask)
50 int start = full ? 0 : mask->range.start;
51 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
52 const long *m = (const long *)((const u8 *)&mask->key + start);
53 const long *s = (const long *)((const u8 *)src + start);
54 long *d = (long *)((u8 *)dst + start);
55 int i;
57 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
58 * if 'full' is false the memory outside of the 'mask->range' is left
59 * uninitialized. This can be used as an optimization when further
60 * operations on 'dst' only use contents within 'mask->range'.
62 for (i = 0; i < len; i += sizeof(long))
63 *d++ = *s++ & *m++;
66 struct sw_flow *ovs_flow_alloc(void)
68 struct sw_flow *flow;
69 struct sw_flow_stats *stats;
71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
72 if (!flow)
73 return ERR_PTR(-ENOMEM);
75 flow->stats_last_writer = -1;
77 /* Initialize the default stat node. */
78 stats = kmem_cache_alloc_node(flow_stats_cache,
79 GFP_KERNEL | __GFP_ZERO,
80 node_online(0) ? 0 : NUMA_NO_NODE);
81 if (!stats)
82 goto err;
84 spin_lock_init(&stats->lock);
86 RCU_INIT_POINTER(flow->stats[0], stats);
88 cpumask_set_cpu(0, &flow->cpu_used_mask);
90 return flow;
91 err:
92 kmem_cache_free(flow_cache, flow);
93 return ERR_PTR(-ENOMEM);
96 int ovs_flow_tbl_count(const struct flow_table *table)
98 return table->count;
101 static void flow_free(struct sw_flow *flow)
103 int cpu;
105 if (ovs_identifier_is_key(&flow->id))
106 kfree(flow->id.unmasked_key);
107 if (flow->sf_acts)
108 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
109 /* We open code this to make sure cpu 0 is always considered */
110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
111 if (flow->stats[cpu])
112 kmem_cache_free(flow_stats_cache,
113 (struct sw_flow_stats __force *)flow->stats[cpu]);
114 kmem_cache_free(flow_cache, flow);
117 static void rcu_free_flow_callback(struct rcu_head *rcu)
119 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
121 flow_free(flow);
124 void ovs_flow_free(struct sw_flow *flow, bool deferred)
126 if (!flow)
127 return;
129 if (deferred)
130 call_rcu(&flow->rcu, rcu_free_flow_callback);
131 else
132 flow_free(flow);
135 static void __table_instance_destroy(struct table_instance *ti)
137 kvfree(ti->buckets);
138 kfree(ti);
141 static struct table_instance *table_instance_alloc(int new_size)
143 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
144 int i;
146 if (!ti)
147 return NULL;
149 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
150 GFP_KERNEL);
151 if (!ti->buckets) {
152 kfree(ti);
153 return NULL;
156 for (i = 0; i < new_size; i++)
157 INIT_HLIST_HEAD(&ti->buckets[i]);
159 ti->n_buckets = new_size;
160 ti->node_ver = 0;
161 ti->keep_flows = false;
162 get_random_bytes(&ti->hash_seed, sizeof(u32));
164 return ti;
167 int ovs_flow_tbl_init(struct flow_table *table)
169 struct table_instance *ti, *ufid_ti;
171 ti = table_instance_alloc(TBL_MIN_BUCKETS);
173 if (!ti)
174 return -ENOMEM;
176 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
177 if (!ufid_ti)
178 goto free_ti;
180 rcu_assign_pointer(table->ti, ti);
181 rcu_assign_pointer(table->ufid_ti, ufid_ti);
182 INIT_LIST_HEAD(&table->mask_list);
183 table->last_rehash = jiffies;
184 table->count = 0;
185 table->ufid_count = 0;
186 return 0;
188 free_ti:
189 __table_instance_destroy(ti);
190 return -ENOMEM;
193 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
195 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
197 __table_instance_destroy(ti);
200 static void table_instance_destroy(struct table_instance *ti,
201 struct table_instance *ufid_ti,
202 bool deferred)
204 int i;
206 if (!ti)
207 return;
209 BUG_ON(!ufid_ti);
210 if (ti->keep_flows)
211 goto skip_flows;
213 for (i = 0; i < ti->n_buckets; i++) {
214 struct sw_flow *flow;
215 struct hlist_head *head = &ti->buckets[i];
216 struct hlist_node *n;
217 int ver = ti->node_ver;
218 int ufid_ver = ufid_ti->node_ver;
220 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
221 hlist_del_rcu(&flow->flow_table.node[ver]);
222 if (ovs_identifier_is_ufid(&flow->id))
223 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
224 ovs_flow_free(flow, deferred);
228 skip_flows:
229 if (deferred) {
230 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
231 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
232 } else {
233 __table_instance_destroy(ti);
234 __table_instance_destroy(ufid_ti);
238 /* No need for locking this function is called from RCU callback or
239 * error path.
241 void ovs_flow_tbl_destroy(struct flow_table *table)
243 struct table_instance *ti = rcu_dereference_raw(table->ti);
244 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
246 table_instance_destroy(ti, ufid_ti, false);
249 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
250 u32 *bucket, u32 *last)
252 struct sw_flow *flow;
253 struct hlist_head *head;
254 int ver;
255 int i;
257 ver = ti->node_ver;
258 while (*bucket < ti->n_buckets) {
259 i = 0;
260 head = &ti->buckets[*bucket];
261 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
262 if (i < *last) {
263 i++;
264 continue;
266 *last = i + 1;
267 return flow;
269 (*bucket)++;
270 *last = 0;
273 return NULL;
276 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
278 hash = jhash_1word(hash, ti->hash_seed);
279 return &ti->buckets[hash & (ti->n_buckets - 1)];
282 static void table_instance_insert(struct table_instance *ti,
283 struct sw_flow *flow)
285 struct hlist_head *head;
287 head = find_bucket(ti, flow->flow_table.hash);
288 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
291 static void ufid_table_instance_insert(struct table_instance *ti,
292 struct sw_flow *flow)
294 struct hlist_head *head;
296 head = find_bucket(ti, flow->ufid_table.hash);
297 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
300 static void flow_table_copy_flows(struct table_instance *old,
301 struct table_instance *new, bool ufid)
303 int old_ver;
304 int i;
306 old_ver = old->node_ver;
307 new->node_ver = !old_ver;
309 /* Insert in new table. */
310 for (i = 0; i < old->n_buckets; i++) {
311 struct sw_flow *flow;
312 struct hlist_head *head = &old->buckets[i];
314 if (ufid)
315 hlist_for_each_entry(flow, head,
316 ufid_table.node[old_ver])
317 ufid_table_instance_insert(new, flow);
318 else
319 hlist_for_each_entry(flow, head,
320 flow_table.node[old_ver])
321 table_instance_insert(new, flow);
324 old->keep_flows = true;
327 static struct table_instance *table_instance_rehash(struct table_instance *ti,
328 int n_buckets, bool ufid)
330 struct table_instance *new_ti;
332 new_ti = table_instance_alloc(n_buckets);
333 if (!new_ti)
334 return NULL;
336 flow_table_copy_flows(ti, new_ti, ufid);
338 return new_ti;
341 int ovs_flow_tbl_flush(struct flow_table *flow_table)
343 struct table_instance *old_ti, *new_ti;
344 struct table_instance *old_ufid_ti, *new_ufid_ti;
346 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
347 if (!new_ti)
348 return -ENOMEM;
349 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
350 if (!new_ufid_ti)
351 goto err_free_ti;
353 old_ti = ovsl_dereference(flow_table->ti);
354 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
356 rcu_assign_pointer(flow_table->ti, new_ti);
357 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
358 flow_table->last_rehash = jiffies;
359 flow_table->count = 0;
360 flow_table->ufid_count = 0;
362 table_instance_destroy(old_ti, old_ufid_ti, true);
363 return 0;
365 err_free_ti:
366 __table_instance_destroy(new_ti);
367 return -ENOMEM;
370 static u32 flow_hash(const struct sw_flow_key *key,
371 const struct sw_flow_key_range *range)
373 int key_start = range->start;
374 int key_end = range->end;
375 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
376 int hash_u32s = (key_end - key_start) >> 2;
378 /* Make sure number of hash bytes are multiple of u32. */
379 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
381 return jhash2(hash_key, hash_u32s, 0);
384 static int flow_key_start(const struct sw_flow_key *key)
386 if (key->tun_proto)
387 return 0;
388 else
389 return rounddown(offsetof(struct sw_flow_key, phy),
390 sizeof(long));
393 static bool cmp_key(const struct sw_flow_key *key1,
394 const struct sw_flow_key *key2,
395 int key_start, int key_end)
397 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
398 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
399 long diffs = 0;
400 int i;
402 for (i = key_start; i < key_end; i += sizeof(long))
403 diffs |= *cp1++ ^ *cp2++;
405 return diffs == 0;
408 static bool flow_cmp_masked_key(const struct sw_flow *flow,
409 const struct sw_flow_key *key,
410 const struct sw_flow_key_range *range)
412 return cmp_key(&flow->key, key, range->start, range->end);
415 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
416 const struct sw_flow_match *match)
418 struct sw_flow_key *key = match->key;
419 int key_start = flow_key_start(key);
420 int key_end = match->range.end;
422 BUG_ON(ovs_identifier_is_ufid(&flow->id));
423 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
426 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
427 const struct sw_flow_key *unmasked,
428 const struct sw_flow_mask *mask)
430 struct sw_flow *flow;
431 struct hlist_head *head;
432 u32 hash;
433 struct sw_flow_key masked_key;
435 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
436 hash = flow_hash(&masked_key, &mask->range);
437 head = find_bucket(ti, hash);
438 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
439 if (flow->mask == mask && flow->flow_table.hash == hash &&
440 flow_cmp_masked_key(flow, &masked_key, &mask->range))
441 return flow;
443 return NULL;
446 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
447 const struct sw_flow_key *key,
448 u32 *n_mask_hit)
450 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
451 struct sw_flow_mask *mask;
452 struct sw_flow *flow;
454 *n_mask_hit = 0;
455 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
456 (*n_mask_hit)++;
457 flow = masked_flow_lookup(ti, key, mask);
458 if (flow) /* Found */
459 return flow;
461 return NULL;
464 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
465 const struct sw_flow_key *key)
467 u32 __always_unused n_mask_hit;
469 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
472 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
473 const struct sw_flow_match *match)
475 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
476 struct sw_flow_mask *mask;
477 struct sw_flow *flow;
479 /* Always called under ovs-mutex. */
480 list_for_each_entry(mask, &tbl->mask_list, list) {
481 flow = masked_flow_lookup(ti, match->key, mask);
482 if (flow && ovs_identifier_is_key(&flow->id) &&
483 ovs_flow_cmp_unmasked_key(flow, match))
484 return flow;
486 return NULL;
489 static u32 ufid_hash(const struct sw_flow_id *sfid)
491 return jhash(sfid->ufid, sfid->ufid_len, 0);
494 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
495 const struct sw_flow_id *sfid)
497 if (flow->id.ufid_len != sfid->ufid_len)
498 return false;
500 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
503 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
505 if (ovs_identifier_is_ufid(&flow->id))
506 return flow_cmp_masked_key(flow, match->key, &match->range);
508 return ovs_flow_cmp_unmasked_key(flow, match);
511 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
512 const struct sw_flow_id *ufid)
514 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
515 struct sw_flow *flow;
516 struct hlist_head *head;
517 u32 hash;
519 hash = ufid_hash(ufid);
520 head = find_bucket(ti, hash);
521 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
522 if (flow->ufid_table.hash == hash &&
523 ovs_flow_cmp_ufid(flow, ufid))
524 return flow;
526 return NULL;
529 int ovs_flow_tbl_num_masks(const struct flow_table *table)
531 struct sw_flow_mask *mask;
532 int num = 0;
534 list_for_each_entry(mask, &table->mask_list, list)
535 num++;
537 return num;
540 static struct table_instance *table_instance_expand(struct table_instance *ti,
541 bool ufid)
543 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
546 /* Remove 'mask' from the mask list, if it is not needed any more. */
547 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
549 if (mask) {
550 /* ovs-lock is required to protect mask-refcount and
551 * mask list.
553 ASSERT_OVSL();
554 BUG_ON(!mask->ref_count);
555 mask->ref_count--;
557 if (!mask->ref_count) {
558 list_del_rcu(&mask->list);
559 kfree_rcu(mask, rcu);
564 /* Must be called with OVS mutex held. */
565 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
567 struct table_instance *ti = ovsl_dereference(table->ti);
568 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
570 BUG_ON(table->count == 0);
571 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
572 table->count--;
573 if (ovs_identifier_is_ufid(&flow->id)) {
574 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
575 table->ufid_count--;
578 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
579 * accessible as long as the RCU read lock is held.
581 flow_mask_remove(table, flow->mask);
584 static struct sw_flow_mask *mask_alloc(void)
586 struct sw_flow_mask *mask;
588 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
589 if (mask)
590 mask->ref_count = 1;
592 return mask;
595 static bool mask_equal(const struct sw_flow_mask *a,
596 const struct sw_flow_mask *b)
598 const u8 *a_ = (const u8 *)&a->key + a->range.start;
599 const u8 *b_ = (const u8 *)&b->key + b->range.start;
601 return (a->range.end == b->range.end)
602 && (a->range.start == b->range.start)
603 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
606 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
607 const struct sw_flow_mask *mask)
609 struct list_head *ml;
611 list_for_each(ml, &tbl->mask_list) {
612 struct sw_flow_mask *m;
613 m = container_of(ml, struct sw_flow_mask, list);
614 if (mask_equal(mask, m))
615 return m;
618 return NULL;
621 /* Add 'mask' into the mask list, if it is not already there. */
622 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
623 const struct sw_flow_mask *new)
625 struct sw_flow_mask *mask;
626 mask = flow_mask_find(tbl, new);
627 if (!mask) {
628 /* Allocate a new mask if none exsits. */
629 mask = mask_alloc();
630 if (!mask)
631 return -ENOMEM;
632 mask->key = new->key;
633 mask->range = new->range;
634 list_add_rcu(&mask->list, &tbl->mask_list);
635 } else {
636 BUG_ON(!mask->ref_count);
637 mask->ref_count++;
640 flow->mask = mask;
641 return 0;
644 /* Must be called with OVS mutex held. */
645 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
647 struct table_instance *new_ti = NULL;
648 struct table_instance *ti;
650 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
651 ti = ovsl_dereference(table->ti);
652 table_instance_insert(ti, flow);
653 table->count++;
655 /* Expand table, if necessary, to make room. */
656 if (table->count > ti->n_buckets)
657 new_ti = table_instance_expand(ti, false);
658 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
659 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
661 if (new_ti) {
662 rcu_assign_pointer(table->ti, new_ti);
663 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
664 table->last_rehash = jiffies;
668 /* Must be called with OVS mutex held. */
669 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
671 struct table_instance *ti;
673 flow->ufid_table.hash = ufid_hash(&flow->id);
674 ti = ovsl_dereference(table->ufid_ti);
675 ufid_table_instance_insert(ti, flow);
676 table->ufid_count++;
678 /* Expand table, if necessary, to make room. */
679 if (table->ufid_count > ti->n_buckets) {
680 struct table_instance *new_ti;
682 new_ti = table_instance_expand(ti, true);
683 if (new_ti) {
684 rcu_assign_pointer(table->ufid_ti, new_ti);
685 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
690 /* Must be called with OVS mutex held. */
691 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
692 const struct sw_flow_mask *mask)
694 int err;
696 err = flow_mask_insert(table, flow, mask);
697 if (err)
698 return err;
699 flow_key_insert(table, flow);
700 if (ovs_identifier_is_ufid(&flow->id))
701 flow_ufid_insert(table, flow);
703 return 0;
706 /* Initializes the flow module.
707 * Returns zero if successful or a negative error code. */
708 int ovs_flow_init(void)
710 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
711 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
713 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
714 + (nr_cpu_ids
715 * sizeof(struct sw_flow_stats *)),
716 0, 0, NULL);
717 if (flow_cache == NULL)
718 return -ENOMEM;
720 flow_stats_cache
721 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
722 0, SLAB_HWCACHE_ALIGN, NULL);
723 if (flow_stats_cache == NULL) {
724 kmem_cache_destroy(flow_cache);
725 flow_cache = NULL;
726 return -ENOMEM;
729 return 0;
732 /* Uninitializes the flow module. */
733 void ovs_flow_exit(void)
735 kmem_cache_destroy(flow_stats_cache);
736 kmem_cache_destroy(flow_cache);