1 // SPDX-License-Identifier: GPL-2.0-only
3 /* PIPAPO: PIle PAcket POlicies: set for arbitrary concatenations of ranges
5 * Copyright (c) 2019-2020 Red Hat GmbH
7 * Author: Stefano Brivio <sbrivio@redhat.com>
11 * DOC: Theory of Operation
17 * Match packet bytes against entries composed of ranged or non-ranged packet
18 * field specifiers, mapping them to arbitrary references. For example:
23 * | [net],[port],[net]... => [reference]
24 * entries [net],[port],[net]... => [reference]
25 * | [net],[port],[net]... => [reference]
28 * where [net] fields can be IP ranges or netmasks, and [port] fields are port
29 * ranges. Arbitrary packet fields can be matched.
35 * This algorithm is loosely inspired by [Ligatti 2010], and fundamentally
36 * relies on the consideration that every contiguous range in a space of b bits
37 * can be converted into b * 2 netmasks, from Theorem 3 in [Rottenstreich 2010],
38 * as also illustrated in Section 9 of [Kogan 2014].
40 * Classification against a number of entries, that require matching given bits
41 * of a packet field, is performed by grouping those bits in sets of arbitrary
42 * size, and classifying packet bits one group at a time.
45 * to match the source port (16 bits) of a packet, we can divide those 16 bits
46 * in 4 groups of 4 bits each. Given the entry:
48 * and a packet with source port:
50 * first and second groups match, but the third doesn't. We conclude that the
51 * packet doesn't match the given entry.
53 * Translate the set to a sequence of lookup tables, one per field. Each table
54 * has two dimensions: bit groups to be matched for a single packet field, and
55 * all the possible values of said groups (buckets). Input entries are
56 * represented as one or more rules, depending on the number of composing
57 * netmasks for the given field specifier, and a group match is indicated as a
58 * set bit, with number corresponding to the rule index, in all the buckets
59 * whose value matches the entry for a given group.
61 * Rules are mapped between fields through an array of x, n pairs, with each
62 * item mapping a matched rule to one or more rules. The position of the pair in
63 * the array indicates the matched rule to be mapped to the next field, x
64 * indicates the first rule index in the next field, and n the amount of
65 * next-field rules the current rule maps to.
67 * The mapping array for the last field maps to the desired references.
69 * To match, we perform table lookups using the values of grouped packet bits,
70 * and use a sequence of bitwise operations to progressively evaluate rule
73 * A stand-alone, reference implementation, also including notes about possible
74 * future optimisations, is available at:
75 * https://pipapo.lameexcu.se/
80 * - For each packet field:
82 * - divide the b packet bits we want to classify into groups of size t,
83 * obtaining ceil(b / t) groups
85 * Example: match on destination IP address, with t = 4: 32 bits, 8 groups
88 * - allocate a lookup table with one column ("bucket") for each possible
89 * value of a group, and with one row for each group
91 * Example: 8 groups, 2^4 buckets:
96 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
106 * - map the bits we want to classify for the current field, for a given
107 * entry, to a single rule for non-ranged and netmask set items, and to one
108 * or multiple rules for ranges. Ranges are expanded to composing netmasks
109 * by pipapo_expand().
111 * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048
112 * - rule #0: 10.0.0.5
113 * - rule #1: 192.168.1.0/24
114 * - rule #2: 192.168.2.0/31
116 * - insert references to the rules in the lookup table, selecting buckets
117 * according to bit values of a rule in the given group. This is done by
121 * - rule #0: 10.0.0.5 mapping to buckets
122 * < 0 10 0 0 0 0 0 5 >
123 * - rule #1: 192.168.1.0/24 mapping to buckets
124 * < 12 0 10 8 0 1 < 0..15 > < 0..15 > >
125 * - rule #2: 192.168.2.0/31 mapping to buckets
126 * < 12 0 10 8 0 2 0 < 0..1 > >
128 * these bits are set in the lookup table:
133 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
140 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
141 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
143 * - if this is not the last field in the set, fill a mapping array that maps
144 * rules from the lookup table to rules belonging to the same entry in
145 * the next lookup table, done by pipapo_map().
147 * Note that as rules map to contiguous ranges of rules, given how netmask
148 * expansion and insertion is performed, &union nft_pipapo_map_bucket stores
149 * this information as pairs of first rule index, rule count.
151 * Example: 2 entries, 10.0.0.5:1024 and 192.168.1.0-192.168.2.1:2048,
152 * given lookup table #0 for field 0 (see example above):
157 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
164 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
165 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
167 * and lookup table #1 for field 1 with:
168 * - rule #0: 1024 mapping to buckets
170 * - rule #1: 2048 mapping to buckets
176 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
182 * we need to map rules for 10.0.0.5 in lookup table #0 (rule #0) to 1024
183 * in lookup table #1 (rule #0) and rules for 192.168.1.0-192.168.2.1
184 * (rules #1, #2) to 2048 in lookup table #2 (rule #1):
188 * rule indices in current field: 0 1 2
189 * map to rules in next field: 0 1 1
191 * - if this is the last field in the set, fill a mapping array that maps
192 * rules from the last lookup table to element pointers, also done by
195 * Note that, in this implementation, we have two elements (start, end) for
196 * each entry. The pointer to the end element is stored in this array, and
197 * the pointer to the start element is linked from it.
199 * Example: entry 10.0.0.5:1024 has a corresponding &struct nft_pipapo_elem
200 * pointer, 0x66, and element for 192.168.1.0-192.168.2.1:2048 is at 0x42.
201 * From the rules of lookup table #1 as mapped above:
205 * rule indices in last field: 0 1
206 * map to elements: 0x66 0x42
212 * We use a result bitmap, with the size of a single lookup table bucket, to
213 * represent the matching state that applies at every algorithm step. This is
214 * done by pipapo_lookup().
216 * - For each packet field:
218 * - start with an all-ones result bitmap (res_map in pipapo_lookup())
220 * - perform a lookup into the table corresponding to the current field,
221 * for each group, and at every group, AND the current result bitmap with
222 * the value from the lookup table bucket
226 * Example: 192.168.1.5 < 12 0 10 8 0 1 0 5 >, with lookup table from
227 * insertion examples.
228 * Lookup table buckets are at least 3 bits wide, we'll assume 8 bits for
229 * convenience in this example. Initial result bitmap is 0xff, the steps
230 * below show the value of the result bitmap after each group is processed:
233 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
235 * result bitmap is now: 0xff & 0x6 [bucket 12] = 0x6
238 * result bitmap is now: 0x6 & 0x6 [bucket 0] = 0x6
241 * result bitmap is now: 0x6 & 0x6 [bucket 10] = 0x6
244 * result bitmap is now: 0x6 & 0x6 [bucket 8] = 0x6
247 * result bitmap is now: 0x6 & 0x7 [bucket 0] = 0x6
250 * result bitmap is now: 0x6 & 0x2 [bucket 1] = 0x2
252 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
253 * result bitmap is now: 0x2 & 0x7 [bucket 0] = 0x2
255 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
256 * final result bitmap for this field is: 0x2 & 0x3 [bucket 5] = 0x2
258 * - at the next field, start with a new, all-zeroes result bitmap. For each
259 * bit set in the previous result bitmap, fill the new result bitmap
260 * (fill_map in pipapo_lookup()) with the rule indices from the
261 * corresponding buckets of the mapping field for this field, done by
264 * Example: with mapping table from insertion examples, with the current
265 * result bitmap from the previous example, 0x02:
269 * rule indices in current field: 0 1 2
270 * map to rules in next field: 0 1 1
272 * the new result bitmap will be 0x02: rule 1 was set, and rule 1 will be
275 * We can now extend this example to cover the second iteration of the step
276 * above (lookup and AND bitmap): assuming the port field is
277 * 2048 < 0 0 5 0 >, with starting result bitmap 0x2, and lookup table
278 * for "port" field from pre-computation example:
283 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
289 * operations are: 0x2 & 0x3 [bucket 0] & 0x3 [bucket 0] & 0x2 [bucket 5]
290 * & 0x3 [bucket 0], resulting bitmap is 0x2.
292 * - if this is the last field in the set, look up the value from the mapping
293 * array corresponding to the final result bitmap
295 * Example: 0x2 resulting bitmap from 192.168.1.5:2048, mapping array for
296 * last field from insertion example:
300 * rule indices in last field: 0 1
301 * map to elements: 0x66 0x42
303 * the matching element is at 0x42.
310 * A Packet-classification Algorithm for Arbitrary Bitmask Rules, with
311 * Automatic Time-space Tradeoffs
312 * Jay Ligatti, Josh Kuhn, and Chris Gage.
313 * Proceedings of the IEEE International Conference on Computer
314 * Communication Networks (ICCCN), August 2010.
315 * https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf
317 * [Rottenstreich 2010]
318 * Worst-Case TCAM Rule Expansion
319 * Ori Rottenstreich and Isaac Keslassy.
320 * 2010 Proceedings IEEE INFOCOM, San Diego, CA, 2010.
321 * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.212.4592&rep=rep1&type=pdf
324 * SAX-PAC (Scalable And eXpressive PAcket Classification)
325 * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane,
326 * and Patrick Eugster.
327 * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014.
328 * https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf
331 #include <linux/kernel.h>
332 #include <linux/init.h>
333 #include <linux/module.h>
334 #include <linux/netlink.h>
335 #include <linux/netfilter.h>
336 #include <linux/netfilter/nf_tables.h>
337 #include <net/netfilter/nf_tables_core.h>
338 #include <uapi/linux/netfilter/nf_tables.h>
339 #include <linux/bitmap.h>
340 #include <linux/bitops.h>
342 #include "nft_set_pipapo_avx2.h"
343 #include "nft_set_pipapo.h"
346 * pipapo_refill() - For each set bit, set bits from selected mapping table item
347 * @map: Bitmap to be scanned for set bits
348 * @len: Length of bitmap in longs
349 * @rules: Number of rules in field
350 * @dst: Destination bitmap
351 * @mt: Mapping table containing bit set specifiers
352 * @match_only: Find a single bit and return, don't fill
354 * Iteration over set bits with __builtin_ctzl(): Daniel Lemire, public domain.
356 * For each bit set in map, select the bucket from mapping table with index
357 * corresponding to the position of the bit set. Use start bit and amount of
358 * bits specified in bucket to fill region in dst.
360 * Return: -1 on no match, bit position on 'match_only', 0 otherwise.
362 int pipapo_refill(unsigned long *map
, unsigned int len
, unsigned int rules
,
364 const union nft_pipapo_map_bucket
*mt
, bool match_only
)
366 unsigned long bitset
;
370 for (k
= 0; k
< len
; k
++) {
373 unsigned long t
= bitset
& -bitset
;
374 int r
= __builtin_ctzl(bitset
);
375 int i
= k
* BITS_PER_LONG
+ r
;
377 if (unlikely(i
>= rules
)) {
383 bitmap_clear(map
, i
, 1);
389 bitmap_set(dst
, mt
[i
].to
, mt
[i
].n
);
400 * nft_pipapo_lookup() - Lookup function
401 * @net: Network namespace
402 * @set: nftables API set representation
403 * @key: nftables API element representation containing key data
404 * @ext: nftables API extension pointer, filled with matching reference
406 * For more details, see DOC: Theory of Operation.
408 * Return: true on match, false otherwise.
410 bool nft_pipapo_lookup(const struct net
*net
, const struct nft_set
*set
,
411 const u32
*key
, const struct nft_set_ext
**ext
)
413 struct nft_pipapo
*priv
= nft_set_priv(set
);
414 struct nft_pipapo_scratch
*scratch
;
415 unsigned long *res_map
, *fill_map
;
416 u8 genmask
= nft_genmask_cur(net
);
417 const struct nft_pipapo_match
*m
;
418 const struct nft_pipapo_field
*f
;
419 const u8
*rp
= (const u8
*)key
;
425 m
= rcu_dereference(priv
->match
);
427 if (unlikely(!m
|| !*raw_cpu_ptr(m
->scratch
)))
430 scratch
= *raw_cpu_ptr(m
->scratch
);
432 map_index
= scratch
->map_index
;
434 res_map
= scratch
->map
+ (map_index
? m
->bsize_max
: 0);
435 fill_map
= scratch
->map
+ (map_index
? 0 : m
->bsize_max
);
437 pipapo_resmap_init(m
, res_map
);
439 nft_pipapo_for_each_field(f
, i
, m
) {
440 bool last
= i
== m
->field_count
- 1;
443 /* For each bit group: select lookup table bucket depending on
444 * packet bytes value, then AND bucket value
446 if (likely(f
->bb
== 8))
447 pipapo_and_field_buckets_8bit(f
, res_map
, rp
);
449 pipapo_and_field_buckets_4bit(f
, res_map
, rp
);
450 NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4
;
452 rp
+= f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
);
454 /* Now populate the bitmap for the next field, unless this is
455 * the last field, in which case return the matched 'ext'
458 * Now res_map contains the matching bitmap, and fill_map is the
459 * bitmap for the next field.
462 b
= pipapo_refill(res_map
, f
->bsize
, f
->rules
, fill_map
, f
->mt
,
465 scratch
->map_index
= map_index
;
472 *ext
= &f
->mt
[b
].e
->ext
;
473 if (unlikely(nft_set_elem_expired(*ext
) ||
474 !nft_set_elem_active(*ext
, genmask
)))
477 /* Last field: we're just returning the key without
478 * filling the initial bitmap for the next field, so the
479 * current inactive bitmap is clean and can be reused as
480 * *next* bitmap (not initial) for the next packet.
482 scratch
->map_index
= map_index
;
488 /* Swap bitmap indices: res_map is the initial bitmap for the
489 * next field, and fill_map is guaranteed to be all-zeroes at
492 map_index
= !map_index
;
493 swap(res_map
, fill_map
);
495 rp
+= NFT_PIPAPO_GROUPS_PADDING(f
);
504 * pipapo_get() - Get matching element reference given key data
505 * @net: Network namespace
506 * @set: nftables API set representation
507 * @m: storage containing active/existing elements
508 * @data: Key data to be matched against existing elements
509 * @genmask: If set, check that element is active in given genmask
510 * @tstamp: timestamp to check for expired elements
511 * @gfp: the type of memory to allocate (see kmalloc).
513 * This is essentially the same as the lookup function, except that it matches
514 * key data against the uncommitted copy and doesn't use preallocated maps for
517 * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
519 static struct nft_pipapo_elem
*pipapo_get(const struct net
*net
,
520 const struct nft_set
*set
,
521 const struct nft_pipapo_match
*m
,
522 const u8
*data
, u8 genmask
,
523 u64 tstamp
, gfp_t gfp
)
525 struct nft_pipapo_elem
*ret
= ERR_PTR(-ENOENT
);
526 unsigned long *res_map
, *fill_map
= NULL
;
527 const struct nft_pipapo_field
*f
;
530 if (m
->bsize_max
== 0)
533 res_map
= kmalloc_array(m
->bsize_max
, sizeof(*res_map
), gfp
);
535 ret
= ERR_PTR(-ENOMEM
);
539 fill_map
= kcalloc(m
->bsize_max
, sizeof(*res_map
), gfp
);
541 ret
= ERR_PTR(-ENOMEM
);
545 pipapo_resmap_init(m
, res_map
);
547 nft_pipapo_for_each_field(f
, i
, m
) {
548 bool last
= i
== m
->field_count
- 1;
551 /* For each bit group: select lookup table bucket depending on
552 * packet bytes value, then AND bucket value
555 pipapo_and_field_buckets_8bit(f
, res_map
, data
);
557 pipapo_and_field_buckets_4bit(f
, res_map
, data
);
561 data
+= f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
);
563 /* Now populate the bitmap for the next field, unless this is
564 * the last field, in which case return the matched 'ext'
567 * Now res_map contains the matching bitmap, and fill_map is the
568 * bitmap for the next field.
571 b
= pipapo_refill(res_map
, f
->bsize
, f
->rules
, fill_map
, f
->mt
,
577 if (__nft_set_elem_expired(&f
->mt
[b
].e
->ext
, tstamp
))
580 !nft_set_elem_active(&f
->mt
[b
].e
->ext
, genmask
)))
587 data
+= NFT_PIPAPO_GROUPS_PADDING(f
);
589 /* Swap bitmap indices: fill_map will be the initial bitmap for
590 * the next field (i.e. the new res_map), and res_map is
591 * guaranteed to be all-zeroes at this point, ready to be filled
592 * according to the next mapping table.
594 swap(res_map
, fill_map
);
604 * nft_pipapo_get() - Get matching element reference given key data
605 * @net: Network namespace
606 * @set: nftables API set representation
607 * @elem: nftables API element representation containing key data
610 static struct nft_elem_priv
*
611 nft_pipapo_get(const struct net
*net
, const struct nft_set
*set
,
612 const struct nft_set_elem
*elem
, unsigned int flags
)
614 struct nft_pipapo
*priv
= nft_set_priv(set
);
615 struct nft_pipapo_match
*m
= rcu_dereference(priv
->match
);
616 struct nft_pipapo_elem
*e
;
618 e
= pipapo_get(net
, set
, m
, (const u8
*)elem
->key
.val
.data
,
619 nft_genmask_cur(net
), get_jiffies_64(),
628 * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
629 * @f: Field containing mapping table
630 * @old_rules: Amount of existing mapped rules
631 * @rules: Amount of new rules to map
633 * Return: 0 on success, negative error code on failure.
635 static int pipapo_realloc_mt(struct nft_pipapo_field
*f
,
636 unsigned int old_rules
, unsigned int rules
)
638 union nft_pipapo_map_bucket
*new_mt
= NULL
, *old_mt
= f
->mt
;
639 const unsigned int extra
= PAGE_SIZE
/ sizeof(*new_mt
);
640 unsigned int rules_alloc
= rules
;
644 if (unlikely(rules
== 0))
647 /* growing and enough space left, no action needed */
648 if (rules
> old_rules
&& f
->rules_alloc
> rules
)
651 /* downsize and extra slack has not grown too large */
652 if (rules
< old_rules
) {
653 unsigned int remove
= f
->rules_alloc
- rules
;
655 if (remove
< (2u * extra
))
659 /* If set needs more than one page of memory for rules then
660 * allocate another extra page to avoid frequent reallocation.
663 check_add_overflow(rules
, extra
, &rules_alloc
))
666 new_mt
= kvmalloc_array(rules_alloc
, sizeof(*new_mt
), GFP_KERNEL_ACCOUNT
);
671 memcpy(new_mt
, old_mt
, min(old_rules
, rules
) * sizeof(*new_mt
));
673 if (rules
> old_rules
) {
674 memset(new_mt
+ old_rules
, 0,
675 (rules
- old_rules
) * sizeof(*new_mt
));
678 f
->rules_alloc
= rules_alloc
;
687 * pipapo_resize() - Resize lookup or mapping table, or both
688 * @f: Field containing lookup and mapping tables
689 * @old_rules: Previous amount of rules in field
690 * @rules: New amount of rules
692 * Increase, decrease or maintain tables size depending on new amount of rules,
693 * and copy data over. In case the new size is smaller, throw away data for
694 * highest-numbered rules.
696 * Return: 0 on success, -ENOMEM on allocation failure.
698 static int pipapo_resize(struct nft_pipapo_field
*f
,
699 unsigned int old_rules
, unsigned int rules
)
701 long *new_lt
= NULL
, *new_p
, *old_lt
= f
->lt
, *old_p
;
702 unsigned int new_bucket_size
, copy
;
703 int group
, bucket
, err
;
705 if (rules
>= NFT_PIPAPO_RULE0_MAX
)
708 new_bucket_size
= DIV_ROUND_UP(rules
, BITS_PER_LONG
);
709 #ifdef NFT_PIPAPO_ALIGN
710 new_bucket_size
= roundup(new_bucket_size
,
711 NFT_PIPAPO_ALIGN
/ sizeof(*new_lt
));
714 if (new_bucket_size
== f
->bsize
)
717 if (new_bucket_size
> f
->bsize
)
720 copy
= new_bucket_size
;
722 new_lt
= kvzalloc(f
->groups
* NFT_PIPAPO_BUCKETS(f
->bb
) *
723 new_bucket_size
* sizeof(*new_lt
) +
724 NFT_PIPAPO_ALIGN_HEADROOM
,
729 new_p
= NFT_PIPAPO_LT_ALIGN(new_lt
);
730 old_p
= NFT_PIPAPO_LT_ALIGN(old_lt
);
732 for (group
= 0; group
< f
->groups
; group
++) {
733 for (bucket
= 0; bucket
< NFT_PIPAPO_BUCKETS(f
->bb
); bucket
++) {
734 memcpy(new_p
, old_p
, copy
* sizeof(*new_p
));
738 if (new_bucket_size
> f
->bsize
)
739 new_p
+= new_bucket_size
- f
->bsize
;
741 old_p
+= f
->bsize
- new_bucket_size
;
746 err
= pipapo_realloc_mt(f
, old_rules
, rules
);
753 f
->bsize
= new_bucket_size
;
762 * pipapo_bucket_set() - Set rule bit in bucket given group and group value
763 * @f: Field containing lookup table
765 * @group: Group index
766 * @v: Value of bit group
768 static void pipapo_bucket_set(struct nft_pipapo_field
*f
, int rule
, int group
,
773 pos
= NFT_PIPAPO_LT_ALIGN(f
->lt
);
774 pos
+= f
->bsize
* NFT_PIPAPO_BUCKETS(f
->bb
) * group
;
777 __set_bit(rule
, pos
);
781 * pipapo_lt_4b_to_8b() - Switch lookup table group width from 4 bits to 8 bits
782 * @old_groups: Number of current groups
783 * @bsize: Size of one bucket, in longs
784 * @old_lt: Pointer to the current lookup table
785 * @new_lt: Pointer to the new, pre-allocated lookup table
787 * Each bucket with index b in the new lookup table, belonging to group g, is
788 * filled with the bit intersection between:
789 * - bucket with index given by the upper 4 bits of b, from group g, and
790 * - bucket with index given by the lower 4 bits of b, from group g + 1
792 * That is, given buckets from the new lookup table N(x, y) and the old lookup
793 * table O(x, y), with x bucket index, and y group index:
795 * N(b, g) := O(b / 16, g) & O(b % 16, g + 1)
797 * This ensures equivalence of the matching results on lookup. Two examples in
801 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ... 254 255
808 * group 0 / 1 2 3 \ 4 5 6 7 8 9 10 11 12 13 |14 15 |
815 static void pipapo_lt_4b_to_8b(int old_groups
, int bsize
,
816 unsigned long *old_lt
, unsigned long *new_lt
)
820 for (g
= 0; g
< old_groups
/ 2; g
++) {
821 int src_g0
= g
* 2, src_g1
= g
* 2 + 1;
823 for (b
= 0; b
< NFT_PIPAPO_BUCKETS(8); b
++) {
824 int src_b0
= b
/ NFT_PIPAPO_BUCKETS(4);
825 int src_b1
= b
% NFT_PIPAPO_BUCKETS(4);
826 int src_i0
= src_g0
* NFT_PIPAPO_BUCKETS(4) + src_b0
;
827 int src_i1
= src_g1
* NFT_PIPAPO_BUCKETS(4) + src_b1
;
829 for (i
= 0; i
< bsize
; i
++) {
830 *new_lt
= old_lt
[src_i0
* bsize
+ i
] &
831 old_lt
[src_i1
* bsize
+ i
];
839 * pipapo_lt_8b_to_4b() - Switch lookup table group width from 8 bits to 4 bits
840 * @old_groups: Number of current groups
841 * @bsize: Size of one bucket, in longs
842 * @old_lt: Pointer to the current lookup table
843 * @new_lt: Pointer to the new, pre-allocated lookup table
845 * Each bucket with index b in the new lookup table, belonging to group g, is
846 * filled with the bit union of:
847 * - all the buckets with index such that the upper four bits of the lower byte
848 * equal b, from group g, with g odd
849 * - all the buckets with index such that the lower four bits equal b, from
850 * group g, with g even
852 * That is, given buckets from the new lookup table N(x, y) and the old lookup
853 * table O(x, y), with x bucket index, and y group index:
855 * - with g odd: N(b, g) := U(O(x, g) for each x : x = (b & 0xf0) >> 4)
856 * - with g even: N(b, g) := U(O(x, g) for each x : x = b & 0x0f)
858 * where U() denotes the arbitrary union operation (binary OR of n terms). This
859 * ensures equivalence of the matching results on lookup.
861 static void pipapo_lt_8b_to_4b(int old_groups
, int bsize
,
862 unsigned long *old_lt
, unsigned long *new_lt
)
866 memset(new_lt
, 0, old_groups
* 2 * NFT_PIPAPO_BUCKETS(4) * bsize
*
867 sizeof(unsigned long));
869 for (g
= 0; g
< old_groups
* 2; g
+= 2) {
872 for (b
= 0; b
< NFT_PIPAPO_BUCKETS(4); b
++) {
873 for (bsrc
= NFT_PIPAPO_BUCKETS(8) * src_g
;
874 bsrc
< NFT_PIPAPO_BUCKETS(8) * (src_g
+ 1);
876 if (((bsrc
& 0xf0) >> 4) != b
)
879 for (i
= 0; i
< bsize
; i
++)
880 new_lt
[i
] |= old_lt
[bsrc
* bsize
+ i
];
886 for (b
= 0; b
< NFT_PIPAPO_BUCKETS(4); b
++) {
887 for (bsrc
= NFT_PIPAPO_BUCKETS(8) * src_g
;
888 bsrc
< NFT_PIPAPO_BUCKETS(8) * (src_g
+ 1);
890 if ((bsrc
& 0x0f) != b
)
893 for (i
= 0; i
< bsize
; i
++)
894 new_lt
[i
] |= old_lt
[bsrc
* bsize
+ i
];
903 * pipapo_lt_bits_adjust() - Adjust group size for lookup table if needed
904 * @f: Field containing lookup table
906 static void pipapo_lt_bits_adjust(struct nft_pipapo_field
*f
)
908 unsigned int groups
, bb
;
909 unsigned long *new_lt
;
912 lt_size
= f
->groups
* NFT_PIPAPO_BUCKETS(f
->bb
) * f
->bsize
*
915 if (f
->bb
== NFT_PIPAPO_GROUP_BITS_SMALL_SET
&&
916 lt_size
> NFT_PIPAPO_LT_SIZE_HIGH
) {
917 groups
= f
->groups
* 2;
918 bb
= NFT_PIPAPO_GROUP_BITS_LARGE_SET
;
920 lt_size
= groups
* NFT_PIPAPO_BUCKETS(bb
) * f
->bsize
*
922 } else if (f
->bb
== NFT_PIPAPO_GROUP_BITS_LARGE_SET
&&
923 lt_size
< NFT_PIPAPO_LT_SIZE_LOW
) {
924 groups
= f
->groups
/ 2;
925 bb
= NFT_PIPAPO_GROUP_BITS_SMALL_SET
;
927 lt_size
= groups
* NFT_PIPAPO_BUCKETS(bb
) * f
->bsize
*
930 /* Don't increase group width if the resulting lookup table size
931 * would exceed the upper size threshold for a "small" set.
933 if (lt_size
> NFT_PIPAPO_LT_SIZE_HIGH
)
939 new_lt
= kvzalloc(lt_size
+ NFT_PIPAPO_ALIGN_HEADROOM
, GFP_KERNEL_ACCOUNT
);
943 NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4
;
944 if (f
->bb
== 4 && bb
== 8) {
945 pipapo_lt_4b_to_8b(f
->groups
, f
->bsize
,
946 NFT_PIPAPO_LT_ALIGN(f
->lt
),
947 NFT_PIPAPO_LT_ALIGN(new_lt
));
948 } else if (f
->bb
== 8 && bb
== 4) {
949 pipapo_lt_8b_to_4b(f
->groups
, f
->bsize
,
950 NFT_PIPAPO_LT_ALIGN(f
->lt
),
951 NFT_PIPAPO_LT_ALIGN(new_lt
));
963 * pipapo_insert() - Insert new rule in field given input key and mask length
964 * @f: Field containing lookup table
965 * @k: Input key for classification, without nftables padding
966 * @mask_bits: Length of mask; matches field length for non-ranged entry
968 * Insert a new rule reference in lookup buckets corresponding to k and
971 * Return: 1 on success (one rule inserted), negative error code on failure.
973 static int pipapo_insert(struct nft_pipapo_field
*f
, const uint8_t *k
,
976 unsigned int rule
= f
->rules
, group
, ret
, bit_offset
= 0;
978 ret
= pipapo_resize(f
, f
->rules
, f
->rules
+ 1);
984 for (group
= 0; group
< f
->groups
; group
++) {
988 v
= k
[group
/ (BITS_PER_BYTE
/ f
->bb
)];
989 v
&= GENMASK(BITS_PER_BYTE
- bit_offset
- 1, 0);
990 v
>>= (BITS_PER_BYTE
- bit_offset
) - f
->bb
;
993 bit_offset
%= BITS_PER_BYTE
;
995 if (mask_bits
>= (group
+ 1) * f
->bb
) {
997 pipapo_bucket_set(f
, rule
, group
, v
);
998 } else if (mask_bits
<= group
* f
->bb
) {
999 /* Completely masked */
1000 for (i
= 0; i
< NFT_PIPAPO_BUCKETS(f
->bb
); i
++)
1001 pipapo_bucket_set(f
, rule
, group
, i
);
1003 /* The mask limit falls on this group */
1004 mask
= GENMASK(f
->bb
- 1, 0);
1005 mask
>>= mask_bits
- group
* f
->bb
;
1006 for (i
= 0; i
< NFT_PIPAPO_BUCKETS(f
->bb
); i
++) {
1007 if ((i
& ~mask
) == (v
& ~mask
))
1008 pipapo_bucket_set(f
, rule
, group
, i
);
1013 pipapo_lt_bits_adjust(f
);
1019 * pipapo_step_diff() - Check if setting @step bit in netmask would change it
1020 * @base: Mask we are expanding
1021 * @step: Step bit for given expansion step
1022 * @len: Total length of mask space (set and unset bits), bytes
1024 * Convenience function for mask expansion.
1026 * Return: true if step bit changes mask (i.e. isn't set), false otherwise.
1028 static bool pipapo_step_diff(u8
*base
, int step
, int len
)
1030 /* Network order, byte-addressed */
1031 #ifdef __BIG_ENDIAN__
1032 return !(BIT(step
% BITS_PER_BYTE
) & base
[step
/ BITS_PER_BYTE
]);
1034 return !(BIT(step
% BITS_PER_BYTE
) &
1035 base
[len
- 1 - step
/ BITS_PER_BYTE
]);
1040 * pipapo_step_after_end() - Check if mask exceeds range end with given step
1041 * @base: Mask we are expanding
1042 * @end: End of range
1043 * @step: Step bit for given expansion step, highest bit to be set
1044 * @len: Total length of mask space (set and unset bits), bytes
1046 * Convenience function for mask expansion.
1048 * Return: true if mask exceeds range setting step bits, false otherwise.
1050 static bool pipapo_step_after_end(const u8
*base
, const u8
*end
, int step
,
1053 u8 tmp
[NFT_PIPAPO_MAX_BYTES
];
1056 memcpy(tmp
, base
, len
);
1058 /* Network order, byte-addressed */
1059 for (i
= 0; i
<= step
; i
++)
1060 #ifdef __BIG_ENDIAN__
1061 tmp
[i
/ BITS_PER_BYTE
] |= BIT(i
% BITS_PER_BYTE
);
1063 tmp
[len
- 1 - i
/ BITS_PER_BYTE
] |= BIT(i
% BITS_PER_BYTE
);
1066 return memcmp(tmp
, end
, len
) > 0;
1070 * pipapo_base_sum() - Sum step bit to given len-sized netmask base with carry
1071 * @base: Netmask base
1072 * @step: Step bit to sum
1073 * @len: Netmask length, bytes
1075 static void pipapo_base_sum(u8
*base
, int step
, int len
)
1080 /* Network order, byte-addressed */
1081 #ifdef __BIG_ENDIAN__
1082 for (i
= step
/ BITS_PER_BYTE
; i
< len
; i
++) {
1084 for (i
= len
- 1 - step
/ BITS_PER_BYTE
; i
>= 0; i
--) {
1089 base
[i
] += 1 << (step
% BITS_PER_BYTE
);
1099 * pipapo_expand() - Expand to composing netmasks, insert into lookup table
1100 * @f: Field containing lookup table
1101 * @start: Start of range
1102 * @end: End of range
1103 * @len: Length of value in bits
1105 * Expand range to composing netmasks and insert corresponding rule references
1106 * in lookup buckets.
1108 * Return: number of inserted rules on success, negative error code on failure.
1110 static int pipapo_expand(struct nft_pipapo_field
*f
,
1111 const u8
*start
, const u8
*end
, int len
)
1113 int step
, masks
= 0, bytes
= DIV_ROUND_UP(len
, BITS_PER_BYTE
);
1114 u8 base
[NFT_PIPAPO_MAX_BYTES
];
1116 memcpy(base
, start
, bytes
);
1117 while (memcmp(base
, end
, bytes
) <= 0) {
1121 while (pipapo_step_diff(base
, step
, bytes
)) {
1122 if (pipapo_step_after_end(base
, end
, step
, bytes
))
1128 err
= pipapo_insert(f
, base
, 0);
1137 err
= pipapo_insert(f
, base
, len
- step
);
1143 pipapo_base_sum(base
, step
, bytes
);
1150 * pipapo_map() - Insert rules in mapping tables, mapping them between fields
1151 * @m: Matching data, including mapping table
1152 * @map: Table of rule maps: array of first rule and amount of rules
1153 * in next field a given rule maps to, for each field
1154 * @e: For last field, nft_set_ext pointer matching rules map to
1156 static void pipapo_map(struct nft_pipapo_match
*m
,
1157 union nft_pipapo_map_bucket map
[NFT_PIPAPO_MAX_FIELDS
],
1158 struct nft_pipapo_elem
*e
)
1160 struct nft_pipapo_field
*f
;
1163 for (i
= 0, f
= m
->f
; i
< m
->field_count
- 1; i
++, f
++) {
1164 for (j
= 0; j
< map
[i
].n
; j
++) {
1165 f
->mt
[map
[i
].to
+ j
].to
= map
[i
+ 1].to
;
1166 f
->mt
[map
[i
].to
+ j
].n
= map
[i
+ 1].n
;
1170 /* Last field: map to ext instead of mapping to next field */
1171 for (j
= 0; j
< map
[i
].n
; j
++)
1172 f
->mt
[map
[i
].to
+ j
].e
= e
;
1176 * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
1180 static void pipapo_free_scratch(const struct nft_pipapo_match
*m
, unsigned int cpu
)
1182 struct nft_pipapo_scratch
*s
;
1185 s
= *per_cpu_ptr(m
->scratch
, cpu
);
1190 mem
-= s
->align_off
;
1195 * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
1196 * @clone: Copy of matching data with pending insertions and deletions
1197 * @bsize_max: Maximum bucket size, scratch maps cover two buckets
1199 * Return: 0 on success, -ENOMEM on failure.
1201 static int pipapo_realloc_scratch(struct nft_pipapo_match
*clone
,
1202 unsigned long bsize_max
)
1206 for_each_possible_cpu(i
) {
1207 struct nft_pipapo_scratch
*scratch
;
1208 #ifdef NFT_PIPAPO_ALIGN
1209 void *scratch_aligned
;
1212 scratch
= kzalloc_node(struct_size(scratch
, map
,
1214 NFT_PIPAPO_ALIGN_HEADROOM
,
1215 GFP_KERNEL_ACCOUNT
, cpu_to_node(i
));
1217 /* On failure, there's no need to undo previous
1218 * allocations: this means that some scratch maps have
1219 * a bigger allocated size now (this is only called on
1220 * insertion), but the extra space won't be used by any
1221 * CPU as new elements are not inserted and m->bsize_max
1227 pipapo_free_scratch(clone
, i
);
1229 #ifdef NFT_PIPAPO_ALIGN
1230 /* Align &scratch->map (not the struct itself): the extra
1231 * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
1232 * above guarantee we can waste up to those bytes in order
1233 * to align the map field regardless of its offset within
1236 BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch
, map
) > NFT_PIPAPO_ALIGN_HEADROOM
);
1238 scratch_aligned
= NFT_PIPAPO_LT_ALIGN(&scratch
->map
);
1239 scratch_aligned
-= offsetof(struct nft_pipapo_scratch
, map
);
1240 align_off
= scratch_aligned
- (void *)scratch
;
1242 scratch
= scratch_aligned
;
1243 scratch
->align_off
= align_off
;
1245 *per_cpu_ptr(clone
->scratch
, i
) = scratch
;
1251 static bool nft_pipapo_transaction_mutex_held(const struct nft_set
*set
)
1253 #ifdef CONFIG_PROVE_LOCKING
1254 const struct net
*net
= read_pnet(&set
->net
);
1256 return lockdep_is_held(&nft_pernet(net
)->commit_mutex
);
1262 static struct nft_pipapo_match
*pipapo_clone(struct nft_pipapo_match
*old
);
1265 * pipapo_maybe_clone() - Build clone for pending data changes, if not existing
1266 * @set: nftables API set representation
1268 * Return: newly created or existing clone, if any. NULL on allocation failure
1270 static struct nft_pipapo_match
*pipapo_maybe_clone(const struct nft_set
*set
)
1272 struct nft_pipapo
*priv
= nft_set_priv(set
);
1273 struct nft_pipapo_match
*m
;
1278 m
= rcu_dereference_protected(priv
->match
,
1279 nft_pipapo_transaction_mutex_held(set
));
1280 priv
->clone
= pipapo_clone(m
);
1286 * nft_pipapo_insert() - Validate and insert ranged elements
1287 * @net: Network namespace
1288 * @set: nftables API set representation
1289 * @elem: nftables API element representation containing key data
1290 * @elem_priv: Filled with pointer to &struct nft_set_ext in inserted element
1292 * Return: 0 on success, error pointer on failure.
1294 static int nft_pipapo_insert(const struct net
*net
, const struct nft_set
*set
,
1295 const struct nft_set_elem
*elem
,
1296 struct nft_elem_priv
**elem_priv
)
1298 const struct nft_set_ext
*ext
= nft_set_elem_ext(set
, elem
->priv
);
1299 union nft_pipapo_map_bucket rulemap
[NFT_PIPAPO_MAX_FIELDS
];
1300 const u8
*start
= (const u8
*)elem
->key
.val
.data
, *end
;
1301 struct nft_pipapo_match
*m
= pipapo_maybe_clone(set
);
1302 u8 genmask
= nft_genmask_next(net
);
1303 struct nft_pipapo_elem
*e
, *dup
;
1304 u64 tstamp
= nft_net_tstamp(net
);
1305 struct nft_pipapo_field
*f
;
1306 const u8
*start_p
, *end_p
;
1307 int i
, bsize_max
, err
= 0;
1312 if (nft_set_ext_exists(ext
, NFT_SET_EXT_KEY_END
))
1313 end
= (const u8
*)nft_set_ext_key_end(ext
)->data
;
1317 dup
= pipapo_get(net
, set
, m
, start
, genmask
, tstamp
, GFP_KERNEL
);
1319 /* Check if we already have the same exact entry */
1320 const struct nft_data
*dup_key
, *dup_end
;
1322 dup_key
= nft_set_ext_key(&dup
->ext
);
1323 if (nft_set_ext_exists(&dup
->ext
, NFT_SET_EXT_KEY_END
))
1324 dup_end
= nft_set_ext_key_end(&dup
->ext
);
1328 if (!memcmp(start
, dup_key
->data
, sizeof(*dup_key
->data
)) &&
1329 !memcmp(end
, dup_end
->data
, sizeof(*dup_end
->data
))) {
1330 *elem_priv
= &dup
->priv
;
1337 if (PTR_ERR(dup
) == -ENOENT
) {
1338 /* Look for partially overlapping entries */
1339 dup
= pipapo_get(net
, set
, m
, end
, nft_genmask_next(net
), tstamp
,
1343 if (PTR_ERR(dup
) != -ENOENT
) {
1345 return PTR_ERR(dup
);
1346 *elem_priv
= &dup
->priv
;
1354 /* some helpers return -1, or 0 >= for valid rule pos,
1355 * so we cannot support more than INT_MAX rules at this time.
1357 BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX
> INT_MAX
);
1359 nft_pipapo_for_each_field(f
, i
, m
) {
1360 if (f
->rules
>= NFT_PIPAPO_RULE0_MAX
)
1363 if (memcmp(start_p
, end_p
,
1364 f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
)) > 0)
1367 start_p
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
1368 end_p
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
1372 bsize_max
= m
->bsize_max
;
1374 nft_pipapo_for_each_field(f
, i
, m
) {
1377 rulemap
[i
].to
= f
->rules
;
1379 ret
= memcmp(start
, end
,
1380 f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
));
1382 ret
= pipapo_insert(f
, start
, f
->groups
* f
->bb
);
1384 ret
= pipapo_expand(f
, start
, end
, f
->groups
* f
->bb
);
1389 if (f
->bsize
> bsize_max
)
1390 bsize_max
= f
->bsize
;
1394 start
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
1395 end
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
1398 if (!*get_cpu_ptr(m
->scratch
) || bsize_max
> m
->bsize_max
) {
1399 put_cpu_ptr(m
->scratch
);
1401 err
= pipapo_realloc_scratch(m
, bsize_max
);
1405 m
->bsize_max
= bsize_max
;
1407 put_cpu_ptr(m
->scratch
);
1410 e
= nft_elem_priv_cast(elem
->priv
);
1411 *elem_priv
= &e
->priv
;
1413 pipapo_map(m
, rulemap
, e
);
1419 * pipapo_clone() - Clone matching data to create new working copy
1420 * @old: Existing matching data
1422 * Return: copy of matching data passed as 'old' or NULL.
1424 static struct nft_pipapo_match
*pipapo_clone(struct nft_pipapo_match
*old
)
1426 struct nft_pipapo_field
*dst
, *src
;
1427 struct nft_pipapo_match
*new;
1430 new = kmalloc(struct_size(new, f
, old
->field_count
), GFP_KERNEL_ACCOUNT
);
1434 new->field_count
= old
->field_count
;
1435 new->bsize_max
= old
->bsize_max
;
1437 new->scratch
= alloc_percpu(*new->scratch
);
1441 for_each_possible_cpu(i
)
1442 *per_cpu_ptr(new->scratch
, i
) = NULL
;
1444 if (pipapo_realloc_scratch(new, old
->bsize_max
))
1445 goto out_scratch_realloc
;
1447 rcu_head_init(&new->rcu
);
1452 for (i
= 0; i
< old
->field_count
; i
++) {
1453 unsigned long *new_lt
;
1455 memcpy(dst
, src
, offsetof(struct nft_pipapo_field
, lt
));
1457 new_lt
= kvzalloc(src
->groups
* NFT_PIPAPO_BUCKETS(src
->bb
) *
1458 src
->bsize
* sizeof(*dst
->lt
) +
1459 NFT_PIPAPO_ALIGN_HEADROOM
,
1460 GFP_KERNEL_ACCOUNT
);
1466 memcpy(NFT_PIPAPO_LT_ALIGN(new_lt
),
1467 NFT_PIPAPO_LT_ALIGN(src
->lt
),
1468 src
->bsize
* sizeof(*dst
->lt
) *
1469 src
->groups
* NFT_PIPAPO_BUCKETS(src
->bb
));
1471 if (src
->rules
> 0) {
1472 dst
->mt
= kvmalloc_array(src
->rules_alloc
,
1474 GFP_KERNEL_ACCOUNT
);
1478 memcpy(dst
->mt
, src
->mt
, src
->rules
* sizeof(*src
->mt
));
1481 dst
->rules_alloc
= 0;
1493 for (dst
--; i
> 0; i
--) {
1498 out_scratch_realloc
:
1499 for_each_possible_cpu(i
)
1500 pipapo_free_scratch(new, i
);
1502 free_percpu(new->scratch
);
1509 * pipapo_rules_same_key() - Get number of rules originated from the same entry
1510 * @f: Field containing mapping table
1511 * @first: Index of first rule in set of rules mapping to same entry
1513 * Using the fact that all rules in a field that originated from the same entry
1514 * will map to the same set of rules in the next field, or to the same element
1515 * reference, return the cardinality of the set of rules that originated from
1516 * the same entry as the rule with index @first, @first rule included.
1520 * field #0 0 1 2 3 4
1521 * map to: 0 1 2-4 2-4 5-9
1527 * in field #1 0 1 2 3 4 5 ...
1529 * if this is called for rule 2 on field #0, it will return 3, as also rules 2
1530 * and 3 in field 0 map to the same set of rules (2, 3, 4) in the next field.
1532 * For the last field in a set, we can rely on associated entries to map to the
1533 * same element references.
1535 * Return: Number of rules that originated from the same entry as @first.
1537 static unsigned int pipapo_rules_same_key(struct nft_pipapo_field
*f
, unsigned int first
)
1539 struct nft_pipapo_elem
*e
= NULL
; /* Keep gcc happy */
1542 for (r
= first
; r
< f
->rules
; r
++) {
1543 if (r
!= first
&& e
!= f
->mt
[r
].e
)
1556 * pipapo_unmap() - Remove rules from mapping tables, renumber remaining ones
1557 * @mt: Mapping array
1558 * @rules: Original amount of rules in mapping table
1559 * @start: First rule index to be removed
1560 * @n: Amount of rules to be removed
1561 * @to_offset: First rule index, in next field, this group of rules maps to
1562 * @is_last: If this is the last field, delete reference from mapping array
1564 * This is used to unmap rules from the mapping table for a single field,
1565 * maintaining consistency and compactness for the existing ones.
1567 * In pictures: let's assume that we want to delete rules 2 and 3 from the
1568 * following mapping array:
1572 * map to: 4-10 4-10 11-15 11-15 16-18
1574 * the result will be:
1578 * map to: 4-10 4-10 11-13
1580 * for fields before the last one. In case this is the mapping table for the
1581 * last field in a set, and rules map to pointers to &struct nft_pipapo_elem:
1585 * element pointers: 0x42 0x42 0x33 0x33 0x44
1587 * the result will be:
1591 * element pointers: 0x42 0x42 0x44
1593 static void pipapo_unmap(union nft_pipapo_map_bucket
*mt
, unsigned int rules
,
1594 unsigned int start
, unsigned int n
,
1595 unsigned int to_offset
, bool is_last
)
1599 memmove(mt
+ start
, mt
+ start
+ n
, (rules
- start
- n
) * sizeof(*mt
));
1600 memset(mt
+ rules
- n
, 0, n
* sizeof(*mt
));
1605 for (i
= start
; i
< rules
- n
; i
++)
1606 mt
[i
].to
-= to_offset
;
1610 * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
1612 * @rulemap: Table of rule maps, arrays of first rule and amount of rules
1613 * in next field a given entry maps to, for each field
1615 * For each rule in lookup table buckets mapping to this set of rules, drop
1616 * all bits set in lookup table mapping. In pictures, assuming we want to drop
1617 * rules 0 and 1 from this lookup table:
1620 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1627 * 6 0,1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1628 * 7 1,2 1,2 1 1 1 0,1 1 1 1 1 1 1 1 1 1 1
1630 * rule 2 becomes rule 0, and the result will be:
1633 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1643 * once this is done, call unmap() to drop all the corresponding rule references
1644 * from mapping tables.
1646 static void pipapo_drop(struct nft_pipapo_match
*m
,
1647 union nft_pipapo_map_bucket rulemap
[])
1649 struct nft_pipapo_field
*f
;
1652 nft_pipapo_for_each_field(f
, i
, m
) {
1655 for (g
= 0; g
< f
->groups
; g
++) {
1659 pos
= NFT_PIPAPO_LT_ALIGN(f
->lt
) + g
*
1660 NFT_PIPAPO_BUCKETS(f
->bb
) * f
->bsize
;
1662 for (b
= 0; b
< NFT_PIPAPO_BUCKETS(f
->bb
); b
++) {
1663 bitmap_cut(pos
, pos
, rulemap
[i
].to
,
1665 f
->bsize
* BITS_PER_LONG
);
1671 pipapo_unmap(f
->mt
, f
->rules
, rulemap
[i
].to
, rulemap
[i
].n
,
1672 rulemap
[i
+ 1].n
, i
== m
->field_count
- 1);
1673 if (pipapo_resize(f
, f
->rules
, f
->rules
- rulemap
[i
].n
)) {
1674 /* We can ignore this, a failure to shrink tables down
1675 * doesn't make tables invalid.
1679 f
->rules
-= rulemap
[i
].n
;
1681 pipapo_lt_bits_adjust(f
);
1685 static void nft_pipapo_gc_deactivate(struct net
*net
, struct nft_set
*set
,
1686 struct nft_pipapo_elem
*e
)
1689 nft_setelem_data_deactivate(net
, set
, &e
->priv
);
1693 * pipapo_gc() - Drop expired entries from set, destroy start and end elements
1694 * @set: nftables API set representation
1697 static void pipapo_gc(struct nft_set
*set
, struct nft_pipapo_match
*m
)
1699 struct nft_pipapo
*priv
= nft_set_priv(set
);
1700 struct net
*net
= read_pnet(&set
->net
);
1701 unsigned int rules_f0
, first_rule
= 0;
1702 u64 tstamp
= nft_net_tstamp(net
);
1703 struct nft_pipapo_elem
*e
;
1704 struct nft_trans_gc
*gc
;
1706 gc
= nft_trans_gc_alloc(set
, 0, GFP_KERNEL
);
1710 while ((rules_f0
= pipapo_rules_same_key(m
->f
, first_rule
))) {
1711 union nft_pipapo_map_bucket rulemap
[NFT_PIPAPO_MAX_FIELDS
];
1712 const struct nft_pipapo_field
*f
;
1713 unsigned int i
, start
, rules_fx
;
1716 rules_fx
= rules_f0
;
1718 nft_pipapo_for_each_field(f
, i
, m
) {
1719 rulemap
[i
].to
= start
;
1720 rulemap
[i
].n
= rules_fx
;
1722 if (i
< m
->field_count
- 1) {
1723 rules_fx
= f
->mt
[start
].n
;
1724 start
= f
->mt
[start
].to
;
1728 /* Pick the last field, and its last index */
1731 e
= f
->mt
[rulemap
[i
].to
].e
;
1733 /* synchronous gc never fails, there is no need to set on
1734 * NFT_SET_ELEM_DEAD_BIT.
1736 if (__nft_set_elem_expired(&e
->ext
, tstamp
)) {
1737 gc
= nft_trans_gc_queue_sync(gc
, GFP_KERNEL
);
1741 nft_pipapo_gc_deactivate(net
, set
, e
);
1742 pipapo_drop(m
, rulemap
);
1743 nft_trans_gc_elem_add(gc
, e
);
1745 /* And check again current first rule, which is now the
1746 * first we haven't checked.
1749 first_rule
+= rules_f0
;
1753 gc
= nft_trans_gc_catchall_sync(gc
);
1755 nft_trans_gc_queue_sync_done(gc
);
1756 priv
->last_gc
= jiffies
;
1761 * pipapo_free_fields() - Free per-field tables contained in matching data
1764 static void pipapo_free_fields(struct nft_pipapo_match
*m
)
1766 struct nft_pipapo_field
*f
;
1769 nft_pipapo_for_each_field(f
, i
, m
) {
1775 static void pipapo_free_match(struct nft_pipapo_match
*m
)
1779 for_each_possible_cpu(i
)
1780 pipapo_free_scratch(m
, i
);
1782 free_percpu(m
->scratch
);
1783 pipapo_free_fields(m
);
1789 * pipapo_reclaim_match - RCU callback to free fields from old matching data
1792 static void pipapo_reclaim_match(struct rcu_head
*rcu
)
1794 struct nft_pipapo_match
*m
;
1796 m
= container_of(rcu
, struct nft_pipapo_match
, rcu
);
1797 pipapo_free_match(m
);
1801 * nft_pipapo_commit() - Replace lookup data with current working copy
1802 * @set: nftables API set representation
1804 * While at it, check if we should perform garbage collection on the working
1805 * copy before committing it for lookup, and don't replace the table if the
1806 * working copy doesn't have pending changes.
1808 * We also need to create a new working copy for subsequent insertions and
1811 static void nft_pipapo_commit(struct nft_set
*set
)
1813 struct nft_pipapo
*priv
= nft_set_priv(set
);
1814 struct nft_pipapo_match
*old
;
1819 if (time_after_eq(jiffies
, priv
->last_gc
+ nft_set_gc_interval(set
)))
1820 pipapo_gc(set
, priv
->clone
);
1822 old
= rcu_replace_pointer(priv
->match
, priv
->clone
,
1823 nft_pipapo_transaction_mutex_held(set
));
1827 call_rcu(&old
->rcu
, pipapo_reclaim_match
);
1830 static void nft_pipapo_abort(const struct nft_set
*set
)
1832 struct nft_pipapo
*priv
= nft_set_priv(set
);
1836 pipapo_free_match(priv
->clone
);
1841 * nft_pipapo_activate() - Mark element reference as active given key, commit
1842 * @net: Network namespace
1843 * @set: nftables API set representation
1844 * @elem_priv: nftables API element representation containing key data
1846 * On insertion, elements are added to a copy of the matching data currently
1847 * in use for lookups, and not directly inserted into current lookup data. Both
1848 * nft_pipapo_insert() and nft_pipapo_activate() are called once for each
1849 * element, hence we can't purpose either one as a real commit operation.
1851 static void nft_pipapo_activate(const struct net
*net
,
1852 const struct nft_set
*set
,
1853 struct nft_elem_priv
*elem_priv
)
1855 struct nft_pipapo_elem
*e
= nft_elem_priv_cast(elem_priv
);
1857 nft_clear(net
, &e
->ext
);
1861 * nft_pipapo_deactivate() - Search for element and make it inactive
1862 * @net: Network namespace
1863 * @set: nftables API set representation
1864 * @elem: nftables API element representation containing key data
1866 * Return: deactivated element if found, NULL otherwise.
1868 static struct nft_elem_priv
*
1869 nft_pipapo_deactivate(const struct net
*net
, const struct nft_set
*set
,
1870 const struct nft_set_elem
*elem
)
1872 struct nft_pipapo_match
*m
= pipapo_maybe_clone(set
);
1873 struct nft_pipapo_elem
*e
;
1875 /* removal must occur on priv->clone, if we are low on memory
1876 * we have no choice and must fail the removal request.
1881 e
= pipapo_get(net
, set
, m
, (const u8
*)elem
->key
.val
.data
,
1882 nft_genmask_next(net
), nft_net_tstamp(net
), GFP_KERNEL
);
1886 nft_set_elem_change_active(net
, set
, &e
->ext
);
1892 * nft_pipapo_flush() - make element inactive
1893 * @net: Network namespace
1894 * @set: nftables API set representation
1895 * @elem_priv: nftables API element representation containing key data
1897 * This is functionally the same as nft_pipapo_deactivate(), with a slightly
1898 * different interface, and it's also called once for each element in a set
1899 * being flushed, so we can't implement, strictly speaking, a flush operation,
1900 * which would otherwise be as simple as allocating an empty copy of the
1903 * Note that we could in theory do that, mark the set as flushed, and ignore
1904 * subsequent calls, but we would leak all the elements after the first one,
1905 * because they wouldn't then be freed as result of API calls.
1907 * Return: true if element was found and deactivated.
1909 static void nft_pipapo_flush(const struct net
*net
, const struct nft_set
*set
,
1910 struct nft_elem_priv
*elem_priv
)
1912 struct nft_pipapo_elem
*e
= nft_elem_priv_cast(elem_priv
);
1914 nft_set_elem_change_active(net
, set
, &e
->ext
);
1918 * pipapo_get_boundaries() - Get byte interval for associated rules
1919 * @f: Field including lookup table
1920 * @first_rule: First rule (lowest index)
1921 * @rule_count: Number of associated rules
1922 * @left: Byte expression for left boundary (start of range)
1923 * @right: Byte expression for right boundary (end of range)
1925 * Given the first rule and amount of rules that originated from the same entry,
1926 * build the original range associated with the entry, and calculate the length
1927 * of the originating netmask.
1932 * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1939 * 6 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1940 * 7 1,2 1,2 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1942 * this is the lookup table corresponding to the IPv4 range
1943 * 192.168.1.0-192.168.2.1, which was expanded to the two composing netmasks,
1944 * rule #1: 192.168.1.0/24, and rule #2: 192.168.2.0/31.
1946 * This function fills @left and @right with the byte values of the leftmost
1947 * and rightmost bucket indices for the lowest and highest rule indices,
1948 * respectively. If @first_rule is 1 and @rule_count is 2, we obtain, in
1950 * left: < 12, 0, 10, 8, 0, 1, 0, 0 >
1951 * right: < 12, 0, 10, 8, 0, 2, 2, 1 >
1952 * corresponding to bytes:
1953 * left: < 192, 168, 1, 0 >
1954 * right: < 192, 168, 2, 1 >
1955 * with mask length irrelevant here, unused on return, as the range is already
1956 * defined by its start and end points. The mask length is relevant for a single
1957 * ranged entry instead: if @first_rule is 1 and @rule_count is 1, we ignore
1958 * rule 2 above: @left becomes < 192, 168, 1, 0 >, @right becomes
1959 * < 192, 168, 1, 255 >, and the mask length, calculated from the distances
1960 * between leftmost and rightmost bucket indices for each group, would be 24.
1962 * Return: mask length, in bits.
1964 static int pipapo_get_boundaries(struct nft_pipapo_field
*f
, int first_rule
,
1965 int rule_count
, u8
*left
, u8
*right
)
1967 int g
, mask_len
= 0, bit_offset
= 0;
1968 u8
*l
= left
, *r
= right
;
1970 for (g
= 0; g
< f
->groups
; g
++) {
1975 for (b
= 0; b
< NFT_PIPAPO_BUCKETS(f
->bb
); b
++) {
1978 pos
= NFT_PIPAPO_LT_ALIGN(f
->lt
) +
1979 (g
* NFT_PIPAPO_BUCKETS(f
->bb
) + b
) * f
->bsize
;
1980 if (test_bit(first_rule
, pos
) && x0
== -1)
1982 if (test_bit(first_rule
+ rule_count
- 1, pos
))
1986 *l
|= x0
<< (BITS_PER_BYTE
- f
->bb
- bit_offset
);
1987 *r
|= x1
<< (BITS_PER_BYTE
- f
->bb
- bit_offset
);
1989 bit_offset
+= f
->bb
;
1990 if (bit_offset
>= BITS_PER_BYTE
) {
1991 bit_offset
%= BITS_PER_BYTE
;
1998 else if (x1
- x0
== 1)
2000 else if (x1
- x0
== 3)
2002 else if (x1
- x0
== 7)
2010 * pipapo_match_field() - Match rules against byte ranges
2011 * @f: Field including the lookup table
2012 * @first_rule: First of associated rules originating from same entry
2013 * @rule_count: Amount of associated rules
2014 * @start: Start of range to be matched
2015 * @end: End of range to be matched
2017 * Return: true on match, false otherwise.
2019 static bool pipapo_match_field(struct nft_pipapo_field
*f
,
2020 int first_rule
, int rule_count
,
2021 const u8
*start
, const u8
*end
)
2023 u8 right
[NFT_PIPAPO_MAX_BYTES
] = { 0 };
2024 u8 left
[NFT_PIPAPO_MAX_BYTES
] = { 0 };
2026 pipapo_get_boundaries(f
, first_rule
, rule_count
, left
, right
);
2028 return !memcmp(start
, left
,
2029 f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
)) &&
2030 !memcmp(end
, right
, f
->groups
/ NFT_PIPAPO_GROUPS_PER_BYTE(f
));
2034 * nft_pipapo_remove() - Remove element given key, commit
2035 * @net: Network namespace
2036 * @set: nftables API set representation
2037 * @elem_priv: nftables API element representation containing key data
2039 * Similarly to nft_pipapo_activate(), this is used as commit operation by the
2040 * API, but it's called once per element in the pending transaction, so we can't
2041 * implement this as a single commit operation. Closest we can get is to remove
2042 * the matched element here, if any, and commit the updated matching data.
2044 static void nft_pipapo_remove(const struct net
*net
, const struct nft_set
*set
,
2045 struct nft_elem_priv
*elem_priv
)
2047 struct nft_pipapo
*priv
= nft_set_priv(set
);
2048 struct nft_pipapo_match
*m
= priv
->clone
;
2049 unsigned int rules_f0
, first_rule
= 0;
2050 struct nft_pipapo_elem
*e
;
2053 e
= nft_elem_priv_cast(elem_priv
);
2054 data
= (const u8
*)nft_set_ext_key(&e
->ext
);
2056 while ((rules_f0
= pipapo_rules_same_key(m
->f
, first_rule
))) {
2057 union nft_pipapo_map_bucket rulemap
[NFT_PIPAPO_MAX_FIELDS
];
2058 const u8
*match_start
, *match_end
;
2059 struct nft_pipapo_field
*f
;
2060 int i
, start
, rules_fx
;
2064 if (nft_set_ext_exists(&e
->ext
, NFT_SET_EXT_KEY_END
))
2065 match_end
= (const u8
*)nft_set_ext_key_end(&e
->ext
)->data
;
2070 rules_fx
= rules_f0
;
2072 nft_pipapo_for_each_field(f
, i
, m
) {
2073 bool last
= i
== m
->field_count
- 1;
2075 if (!pipapo_match_field(f
, start
, rules_fx
,
2076 match_start
, match_end
))
2079 rulemap
[i
].to
= start
;
2080 rulemap
[i
].n
= rules_fx
;
2082 rules_fx
= f
->mt
[start
].n
;
2083 start
= f
->mt
[start
].to
;
2085 match_start
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
2086 match_end
+= NFT_PIPAPO_GROUPS_PADDED_SIZE(f
);
2088 if (last
&& f
->mt
[rulemap
[i
].to
].e
== e
) {
2089 pipapo_drop(m
, rulemap
);
2094 first_rule
+= rules_f0
;
2097 WARN_ON_ONCE(1); /* elem_priv not found */
2101 * nft_pipapo_do_walk() - Walk over elements in m
2102 * @ctx: nftables API context
2103 * @set: nftables API set representation
2104 * @m: matching data pointing to key mapping array
2107 * As elements are referenced in the mapping array for the last field, directly
2108 * scan that array: there's no need to follow rule mappings from the first
2109 * field. @m is protected either by RCU read lock or by transaction mutex.
2111 static void nft_pipapo_do_walk(const struct nft_ctx
*ctx
, struct nft_set
*set
,
2112 const struct nft_pipapo_match
*m
,
2113 struct nft_set_iter
*iter
)
2115 const struct nft_pipapo_field
*f
;
2118 for (i
= 0, f
= m
->f
; i
< m
->field_count
- 1; i
++, f
++)
2121 for (r
= 0; r
< f
->rules
; r
++) {
2122 struct nft_pipapo_elem
*e
;
2124 if (r
< f
->rules
- 1 && f
->mt
[r
+ 1].e
== f
->mt
[r
].e
)
2127 if (iter
->count
< iter
->skip
)
2132 iter
->err
= iter
->fn(ctx
, set
, iter
, &e
->priv
);
2142 * nft_pipapo_walk() - Walk over elements
2143 * @ctx: nftables API context
2144 * @set: nftables API set representation
2147 * Test if destructive action is needed or not, clone active backend if needed
2148 * and call the real function to work on the data.
2150 static void nft_pipapo_walk(const struct nft_ctx
*ctx
, struct nft_set
*set
,
2151 struct nft_set_iter
*iter
)
2153 struct nft_pipapo
*priv
= nft_set_priv(set
);
2154 const struct nft_pipapo_match
*m
;
2156 switch (iter
->type
) {
2157 case NFT_ITER_UPDATE
:
2158 m
= pipapo_maybe_clone(set
);
2160 iter
->err
= -ENOMEM
;
2164 nft_pipapo_do_walk(ctx
, set
, m
, iter
);
2168 m
= rcu_dereference(priv
->match
);
2169 nft_pipapo_do_walk(ctx
, set
, m
, iter
);
2173 iter
->err
= -EINVAL
;
2180 * nft_pipapo_privsize() - Return the size of private data for the set
2181 * @nla: netlink attributes, ignored as size doesn't depend on them
2182 * @desc: Set description, ignored as size doesn't depend on it
2184 * Return: size of private data for this set implementation, in bytes
2186 static u64
nft_pipapo_privsize(const struct nlattr
* const nla
[],
2187 const struct nft_set_desc
*desc
)
2189 return sizeof(struct nft_pipapo
);
2193 * nft_pipapo_estimate() - Set size, space and lookup complexity
2194 * @desc: Set description, element count and field description used
2195 * @features: Flags: NFT_SET_INTERVAL needs to be there
2196 * @est: Storage for estimation data
2198 * Return: true if set description is compatible, false otherwise
2200 static bool nft_pipapo_estimate(const struct nft_set_desc
*desc
, u32 features
,
2201 struct nft_set_estimate
*est
)
2203 if (!(features
& NFT_SET_INTERVAL
) ||
2204 desc
->field_count
< NFT_PIPAPO_MIN_FIELDS
)
2207 est
->size
= pipapo_estimate_size(desc
);
2211 est
->lookup
= NFT_SET_CLASS_O_LOG_N
;
2213 est
->space
= NFT_SET_CLASS_O_N
;
2219 * nft_pipapo_init() - Initialise data for a set instance
2220 * @set: nftables API set representation
2221 * @desc: Set description
2222 * @nla: netlink attributes
2224 * Validate number and size of fields passed as NFTA_SET_DESC_CONCAT netlink
2225 * attributes, initialise internal set parameters, current instance of matching
2226 * data and a copy for subsequent insertions.
2228 * Return: 0 on success, negative error code on failure.
2230 static int nft_pipapo_init(const struct nft_set
*set
,
2231 const struct nft_set_desc
*desc
,
2232 const struct nlattr
* const nla
[])
2234 struct nft_pipapo
*priv
= nft_set_priv(set
);
2235 struct nft_pipapo_match
*m
;
2236 struct nft_pipapo_field
*f
;
2237 int err
, i
, field_count
;
2239 BUILD_BUG_ON(offsetof(struct nft_pipapo_elem
, priv
) != 0);
2241 field_count
= desc
->field_count
? : 1;
2243 BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS
> 255);
2244 BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS
!= NFT_REG32_COUNT
);
2246 if (field_count
> NFT_PIPAPO_MAX_FIELDS
)
2249 m
= kmalloc(struct_size(m
, f
, field_count
), GFP_KERNEL
);
2253 m
->field_count
= field_count
;
2256 m
->scratch
= alloc_percpu(struct nft_pipapo_scratch
*);
2261 for_each_possible_cpu(i
)
2262 *per_cpu_ptr(m
->scratch
, i
) = NULL
;
2264 rcu_head_init(&m
->rcu
);
2266 nft_pipapo_for_each_field(f
, i
, m
) {
2267 unsigned int len
= desc
->field_len
[i
] ? : set
->klen
;
2269 /* f->groups is u8 */
2270 BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES
*
2271 BITS_PER_BYTE
/ NFT_PIPAPO_GROUP_BITS_LARGE_SET
) >= 256);
2273 f
->bb
= NFT_PIPAPO_GROUP_BITS_INIT
;
2274 f
->groups
= len
* NFT_PIPAPO_GROUPS_PER_BYTE(f
);
2276 priv
->width
+= round_up(len
, sizeof(u32
));
2285 rcu_assign_pointer(priv
->match
, m
);
2296 * nft_set_pipapo_match_destroy() - Destroy elements from key mapping array
2298 * @set: nftables API set representation
2299 * @m: matching data pointing to key mapping array
2301 static void nft_set_pipapo_match_destroy(const struct nft_ctx
*ctx
,
2302 const struct nft_set
*set
,
2303 struct nft_pipapo_match
*m
)
2305 struct nft_pipapo_field
*f
;
2308 for (i
= 0, f
= m
->f
; i
< m
->field_count
- 1; i
++, f
++)
2311 for (r
= 0; r
< f
->rules
; r
++) {
2312 struct nft_pipapo_elem
*e
;
2314 if (r
< f
->rules
- 1 && f
->mt
[r
+ 1].e
== f
->mt
[r
].e
)
2319 nf_tables_set_elem_destroy(ctx
, set
, &e
->priv
);
2324 * nft_pipapo_destroy() - Free private data for set and all committed elements
2326 * @set: nftables API set representation
2328 static void nft_pipapo_destroy(const struct nft_ctx
*ctx
,
2329 const struct nft_set
*set
)
2331 struct nft_pipapo
*priv
= nft_set_priv(set
);
2332 struct nft_pipapo_match
*m
;
2334 m
= rcu_dereference_protected(priv
->match
, true);
2337 nft_set_pipapo_match_destroy(ctx
, set
, priv
->clone
);
2338 pipapo_free_match(priv
->clone
);
2341 nft_set_pipapo_match_destroy(ctx
, set
, m
);
2344 pipapo_free_match(m
);
2348 * nft_pipapo_gc_init() - Initialise garbage collection
2349 * @set: nftables API set representation
2351 * Instead of actually setting up a periodic work for garbage collection, as
2352 * this operation requires a swap of matching data with the working copy, we'll
2353 * do that opportunistically with other commit operations if the interval is
2354 * elapsed, so we just need to set the current jiffies timestamp here.
2356 static void nft_pipapo_gc_init(const struct nft_set
*set
)
2358 struct nft_pipapo
*priv
= nft_set_priv(set
);
2360 priv
->last_gc
= jiffies
;
2363 const struct nft_set_type nft_set_pipapo_type
= {
2364 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
|
2367 .lookup
= nft_pipapo_lookup
,
2368 .insert
= nft_pipapo_insert
,
2369 .activate
= nft_pipapo_activate
,
2370 .deactivate
= nft_pipapo_deactivate
,
2371 .flush
= nft_pipapo_flush
,
2372 .remove
= nft_pipapo_remove
,
2373 .walk
= nft_pipapo_walk
,
2374 .get
= nft_pipapo_get
,
2375 .privsize
= nft_pipapo_privsize
,
2376 .estimate
= nft_pipapo_estimate
,
2377 .init
= nft_pipapo_init
,
2378 .destroy
= nft_pipapo_destroy
,
2379 .gc_init
= nft_pipapo_gc_init
,
2380 .commit
= nft_pipapo_commit
,
2381 .abort
= nft_pipapo_abort
,
2382 .elemsize
= offsetof(struct nft_pipapo_elem
, ext
),
2386 #if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
2387 const struct nft_set_type nft_set_pipapo_avx2_type
= {
2388 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
|
2391 .lookup
= nft_pipapo_avx2_lookup
,
2392 .insert
= nft_pipapo_insert
,
2393 .activate
= nft_pipapo_activate
,
2394 .deactivate
= nft_pipapo_deactivate
,
2395 .flush
= nft_pipapo_flush
,
2396 .remove
= nft_pipapo_remove
,
2397 .walk
= nft_pipapo_walk
,
2398 .get
= nft_pipapo_get
,
2399 .privsize
= nft_pipapo_privsize
,
2400 .estimate
= nft_pipapo_avx2_estimate
,
2401 .init
= nft_pipapo_init
,
2402 .destroy
= nft_pipapo_destroy
,
2403 .gc_init
= nft_pipapo_gc_init
,
2404 .commit
= nft_pipapo_commit
,
2405 .abort
= nft_pipapo_abort
,
2406 .elemsize
= offsetof(struct nft_pipapo_elem
, ext
),