thp: change pmd_trans_huge_lock() interface to return ptl
[linux/fpc-iii.git] / net / netfilter / nft_hash.c
blob3f9d45d3d9b7260c3f01842bcbb2a2d2191ee5c0
1 /*
2 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/jhash.h>
17 #include <linux/netlink.h>
18 #include <linux/workqueue.h>
19 #include <linux/rhashtable.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/nf_tables.h>
22 #include <net/netfilter/nf_tables.h>
24 /* We target a hash table size of 4, element hint is 75% of final size */
25 #define NFT_HASH_ELEMENT_HINT 3
27 struct nft_hash {
28 struct rhashtable ht;
29 struct delayed_work gc_work;
32 struct nft_hash_elem {
33 struct rhash_head node;
34 struct nft_set_ext ext;
37 struct nft_hash_cmp_arg {
38 const struct nft_set *set;
39 const u32 *key;
40 u8 genmask;
43 static const struct rhashtable_params nft_hash_params;
45 static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
47 const struct nft_hash_cmp_arg *arg = data;
49 return jhash(arg->key, len, seed);
52 static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
54 const struct nft_hash_elem *he = data;
56 return jhash(nft_set_ext_key(&he->ext), len, seed);
59 static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
60 const void *ptr)
62 const struct nft_hash_cmp_arg *x = arg->key;
63 const struct nft_hash_elem *he = ptr;
65 if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
66 return 1;
67 if (nft_set_elem_expired(&he->ext))
68 return 1;
69 if (!nft_set_elem_active(&he->ext, x->genmask))
70 return 1;
71 return 0;
74 static bool nft_hash_lookup(const struct nft_set *set, const u32 *key,
75 const struct nft_set_ext **ext)
77 struct nft_hash *priv = nft_set_priv(set);
78 const struct nft_hash_elem *he;
79 struct nft_hash_cmp_arg arg = {
80 .genmask = nft_genmask_cur(read_pnet(&set->pnet)),
81 .set = set,
82 .key = key,
85 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
86 if (he != NULL)
87 *ext = &he->ext;
89 return !!he;
92 static bool nft_hash_update(struct nft_set *set, const u32 *key,
93 void *(*new)(struct nft_set *,
94 const struct nft_expr *,
95 struct nft_regs *regs),
96 const struct nft_expr *expr,
97 struct nft_regs *regs,
98 const struct nft_set_ext **ext)
100 struct nft_hash *priv = nft_set_priv(set);
101 struct nft_hash_elem *he;
102 struct nft_hash_cmp_arg arg = {
103 .genmask = NFT_GENMASK_ANY,
104 .set = set,
105 .key = key,
108 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
109 if (he != NULL)
110 goto out;
112 he = new(set, expr, regs);
113 if (he == NULL)
114 goto err1;
115 if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
116 nft_hash_params))
117 goto err2;
118 out:
119 *ext = &he->ext;
120 return true;
122 err2:
123 nft_set_elem_destroy(set, he);
124 err1:
125 return false;
128 static int nft_hash_insert(const struct nft_set *set,
129 const struct nft_set_elem *elem)
131 struct nft_hash *priv = nft_set_priv(set);
132 struct nft_hash_elem *he = elem->priv;
133 struct nft_hash_cmp_arg arg = {
134 .genmask = nft_genmask_next(read_pnet(&set->pnet)),
135 .set = set,
136 .key = elem->key.val.data,
139 return rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
140 nft_hash_params);
143 static void nft_hash_activate(const struct nft_set *set,
144 const struct nft_set_elem *elem)
146 struct nft_hash_elem *he = elem->priv;
148 nft_set_elem_change_active(set, &he->ext);
149 nft_set_elem_clear_busy(&he->ext);
152 static void *nft_hash_deactivate(const struct nft_set *set,
153 const struct nft_set_elem *elem)
155 struct nft_hash *priv = nft_set_priv(set);
156 struct nft_hash_elem *he;
157 struct nft_hash_cmp_arg arg = {
158 .genmask = nft_genmask_next(read_pnet(&set->pnet)),
159 .set = set,
160 .key = elem->key.val.data,
163 rcu_read_lock();
164 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
165 if (he != NULL) {
166 if (!nft_set_elem_mark_busy(&he->ext))
167 nft_set_elem_change_active(set, &he->ext);
168 else
169 he = NULL;
171 rcu_read_unlock();
173 return he;
176 static void nft_hash_remove(const struct nft_set *set,
177 const struct nft_set_elem *elem)
179 struct nft_hash *priv = nft_set_priv(set);
180 struct nft_hash_elem *he = elem->priv;
182 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
185 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
186 struct nft_set_iter *iter)
188 struct nft_hash *priv = nft_set_priv(set);
189 struct nft_hash_elem *he;
190 struct rhashtable_iter hti;
191 struct nft_set_elem elem;
192 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
193 int err;
195 err = rhashtable_walk_init(&priv->ht, &hti);
196 iter->err = err;
197 if (err)
198 return;
200 err = rhashtable_walk_start(&hti);
201 if (err && err != -EAGAIN) {
202 iter->err = err;
203 goto out;
206 while ((he = rhashtable_walk_next(&hti))) {
207 if (IS_ERR(he)) {
208 err = PTR_ERR(he);
209 if (err != -EAGAIN) {
210 iter->err = err;
211 goto out;
214 continue;
217 if (iter->count < iter->skip)
218 goto cont;
219 if (nft_set_elem_expired(&he->ext))
220 goto cont;
221 if (!nft_set_elem_active(&he->ext, genmask))
222 goto cont;
224 elem.priv = he;
226 iter->err = iter->fn(ctx, set, iter, &elem);
227 if (iter->err < 0)
228 goto out;
230 cont:
231 iter->count++;
234 out:
235 rhashtable_walk_stop(&hti);
236 rhashtable_walk_exit(&hti);
239 static void nft_hash_gc(struct work_struct *work)
241 struct nft_set *set;
242 struct nft_hash_elem *he;
243 struct nft_hash *priv;
244 struct nft_set_gc_batch *gcb = NULL;
245 struct rhashtable_iter hti;
246 int err;
248 priv = container_of(work, struct nft_hash, gc_work.work);
249 set = nft_set_container_of(priv);
251 err = rhashtable_walk_init(&priv->ht, &hti);
252 if (err)
253 goto schedule;
255 err = rhashtable_walk_start(&hti);
256 if (err && err != -EAGAIN)
257 goto out;
259 while ((he = rhashtable_walk_next(&hti))) {
260 if (IS_ERR(he)) {
261 if (PTR_ERR(he) != -EAGAIN)
262 goto out;
263 continue;
266 if (!nft_set_elem_expired(&he->ext))
267 continue;
268 if (nft_set_elem_mark_busy(&he->ext))
269 continue;
271 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
272 if (gcb == NULL)
273 goto out;
274 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
275 atomic_dec(&set->nelems);
276 nft_set_gc_batch_add(gcb, he);
278 out:
279 rhashtable_walk_stop(&hti);
280 rhashtable_walk_exit(&hti);
282 nft_set_gc_batch_complete(gcb);
283 schedule:
284 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
285 nft_set_gc_interval(set));
288 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
290 return sizeof(struct nft_hash);
293 static const struct rhashtable_params nft_hash_params = {
294 .head_offset = offsetof(struct nft_hash_elem, node),
295 .hashfn = nft_hash_key,
296 .obj_hashfn = nft_hash_obj,
297 .obj_cmpfn = nft_hash_cmp,
298 .automatic_shrinking = true,
301 static int nft_hash_init(const struct nft_set *set,
302 const struct nft_set_desc *desc,
303 const struct nlattr * const tb[])
305 struct nft_hash *priv = nft_set_priv(set);
306 struct rhashtable_params params = nft_hash_params;
307 int err;
309 params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
310 params.key_len = set->klen;
312 err = rhashtable_init(&priv->ht, &params);
313 if (err < 0)
314 return err;
316 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
317 if (set->flags & NFT_SET_TIMEOUT)
318 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
319 nft_set_gc_interval(set));
320 return 0;
323 static void nft_hash_elem_destroy(void *ptr, void *arg)
325 nft_set_elem_destroy((const struct nft_set *)arg, ptr);
328 static void nft_hash_destroy(const struct nft_set *set)
330 struct nft_hash *priv = nft_set_priv(set);
332 cancel_delayed_work_sync(&priv->gc_work);
333 rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
334 (void *)set);
337 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
338 struct nft_set_estimate *est)
340 unsigned int esize;
342 esize = sizeof(struct nft_hash_elem);
343 if (desc->size) {
344 est->size = sizeof(struct nft_hash) +
345 roundup_pow_of_two(desc->size * 4 / 3) *
346 sizeof(struct nft_hash_elem *) +
347 desc->size * esize;
348 } else {
349 /* Resizing happens when the load drops below 30% or goes
350 * above 75%. The average of 52.5% load (approximated by 50%)
351 * is used for the size estimation of the hash buckets,
352 * meaning we calculate two buckets per element.
354 est->size = esize + 2 * sizeof(struct nft_hash_elem *);
357 est->class = NFT_SET_CLASS_O_1;
359 return true;
362 static struct nft_set_ops nft_hash_ops __read_mostly = {
363 .privsize = nft_hash_privsize,
364 .elemsize = offsetof(struct nft_hash_elem, ext),
365 .estimate = nft_hash_estimate,
366 .init = nft_hash_init,
367 .destroy = nft_hash_destroy,
368 .insert = nft_hash_insert,
369 .activate = nft_hash_activate,
370 .deactivate = nft_hash_deactivate,
371 .remove = nft_hash_remove,
372 .lookup = nft_hash_lookup,
373 .update = nft_hash_update,
374 .walk = nft_hash_walk,
375 .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
376 .owner = THIS_MODULE,
379 static int __init nft_hash_module_init(void)
381 return nft_register_set(&nft_hash_ops);
384 static void __exit nft_hash_module_exit(void)
386 nft_unregister_set(&nft_hash_ops);
389 module_init(nft_hash_module_init);
390 module_exit(nft_hash_module_exit);
392 MODULE_LICENSE("GPL");
393 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
394 MODULE_ALIAS_NFT_SET();