Merge tag 'mmc-v4.6-rc4' of git://git.linaro.org/people/ulf.hansson/mmc
[linux/fpc-iii.git] / net / ipv6 / ila / ila_xlat.c
blob295ca29a23c3aa4dddba2d1852455d86a48f0fc6
1 #include <linux/jhash.h>
2 #include <linux/netfilter.h>
3 #include <linux/rcupdate.h>
4 #include <linux/rhashtable.h>
5 #include <linux/vmalloc.h>
6 #include <net/genetlink.h>
7 #include <net/ila.h>
8 #include <net/netns/generic.h>
9 #include <uapi/linux/genetlink.h>
10 #include "ila.h"
12 struct ila_xlat_params {
13 struct ila_params ip;
14 __be64 identifier;
15 int ifindex;
16 unsigned int dir;
19 struct ila_map {
20 struct ila_xlat_params p;
21 struct rhash_head node;
22 struct ila_map __rcu *next;
23 struct rcu_head rcu;
26 static unsigned int ila_net_id;
28 struct ila_net {
29 struct rhashtable rhash_table;
30 spinlock_t *locks; /* Bucket locks for entry manipulation */
31 unsigned int locks_mask;
32 bool hooks_registered;
35 #define LOCKS_PER_CPU 10
37 static int alloc_ila_locks(struct ila_net *ilan)
39 unsigned int i, size;
40 unsigned int nr_pcpus = num_possible_cpus();
42 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
43 size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU);
45 if (sizeof(spinlock_t) != 0) {
46 #ifdef CONFIG_NUMA
47 if (size * sizeof(spinlock_t) > PAGE_SIZE)
48 ilan->locks = vmalloc(size * sizeof(spinlock_t));
49 else
50 #endif
51 ilan->locks = kmalloc_array(size, sizeof(spinlock_t),
52 GFP_KERNEL);
53 if (!ilan->locks)
54 return -ENOMEM;
55 for (i = 0; i < size; i++)
56 spin_lock_init(&ilan->locks[i]);
58 ilan->locks_mask = size - 1;
60 return 0;
63 static u32 hashrnd __read_mostly;
64 static __always_inline void __ila_hash_secret_init(void)
66 net_get_random_once(&hashrnd, sizeof(hashrnd));
69 static inline u32 ila_identifier_hash(__be64 identifier)
71 u32 *v = (u32 *)&identifier;
73 return jhash_2words(v[0], v[1], hashrnd);
76 static inline spinlock_t *ila_get_lock(struct ila_net *ilan, __be64 identifier)
78 return &ilan->locks[ila_identifier_hash(identifier) & ilan->locks_mask];
81 static inline int ila_cmp_wildcards(struct ila_map *ila, __be64 loc,
82 int ifindex, unsigned int dir)
84 return (ila->p.ip.locator_match && ila->p.ip.locator_match != loc) ||
85 (ila->p.ifindex && ila->p.ifindex != ifindex) ||
86 !(ila->p.dir & dir);
89 static inline int ila_cmp_params(struct ila_map *ila, struct ila_xlat_params *p)
91 return (ila->p.ip.locator_match != p->ip.locator_match) ||
92 (ila->p.ifindex != p->ifindex) ||
93 (ila->p.dir != p->dir);
96 static int ila_cmpfn(struct rhashtable_compare_arg *arg,
97 const void *obj)
99 const struct ila_map *ila = obj;
101 return (ila->p.identifier != *(__be64 *)arg->key);
104 static inline int ila_order(struct ila_map *ila)
106 int score = 0;
108 if (ila->p.ip.locator_match)
109 score += 1 << 0;
111 if (ila->p.ifindex)
112 score += 1 << 1;
114 return score;
117 static const struct rhashtable_params rht_params = {
118 .nelem_hint = 1024,
119 .head_offset = offsetof(struct ila_map, node),
120 .key_offset = offsetof(struct ila_map, p.identifier),
121 .key_len = sizeof(u64), /* identifier */
122 .max_size = 1048576,
123 .min_size = 256,
124 .automatic_shrinking = true,
125 .obj_cmpfn = ila_cmpfn,
128 static struct genl_family ila_nl_family = {
129 .id = GENL_ID_GENERATE,
130 .hdrsize = 0,
131 .name = ILA_GENL_NAME,
132 .version = ILA_GENL_VERSION,
133 .maxattr = ILA_ATTR_MAX,
134 .netnsok = true,
135 .parallel_ops = true,
138 static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
139 [ILA_ATTR_IDENTIFIER] = { .type = NLA_U64, },
140 [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
141 [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
142 [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
143 [ILA_ATTR_DIR] = { .type = NLA_U32, },
146 static int parse_nl_config(struct genl_info *info,
147 struct ila_xlat_params *p)
149 memset(p, 0, sizeof(*p));
151 if (info->attrs[ILA_ATTR_IDENTIFIER])
152 p->identifier = (__force __be64)nla_get_u64(
153 info->attrs[ILA_ATTR_IDENTIFIER]);
155 if (info->attrs[ILA_ATTR_LOCATOR])
156 p->ip.locator = (__force __be64)nla_get_u64(
157 info->attrs[ILA_ATTR_LOCATOR]);
159 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
160 p->ip.locator_match = (__force __be64)nla_get_u64(
161 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
163 if (info->attrs[ILA_ATTR_IFINDEX])
164 p->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
166 if (info->attrs[ILA_ATTR_DIR])
167 p->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]);
169 return 0;
172 /* Must be called with rcu readlock */
173 static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc,
174 int ifindex,
175 unsigned int dir,
176 struct ila_net *ilan)
178 struct ila_map *ila;
180 ila = rhashtable_lookup_fast(&ilan->rhash_table, &id, rht_params);
181 while (ila) {
182 if (!ila_cmp_wildcards(ila, loc, ifindex, dir))
183 return ila;
184 ila = rcu_access_pointer(ila->next);
187 return NULL;
190 /* Must be called with rcu readlock */
191 static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *p,
192 struct ila_net *ilan)
194 struct ila_map *ila;
196 ila = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
197 rht_params);
198 while (ila) {
199 if (!ila_cmp_params(ila, p))
200 return ila;
201 ila = rcu_access_pointer(ila->next);
204 return NULL;
207 static inline void ila_release(struct ila_map *ila)
209 kfree_rcu(ila, rcu);
212 static void ila_free_cb(void *ptr, void *arg)
214 struct ila_map *ila = (struct ila_map *)ptr, *next;
216 /* Assume rcu_readlock held */
217 while (ila) {
218 next = rcu_access_pointer(ila->next);
219 ila_release(ila);
220 ila = next;
224 static int ila_xlat_addr(struct sk_buff *skb, int dir);
226 static unsigned int
227 ila_nf_input(void *priv,
228 struct sk_buff *skb,
229 const struct nf_hook_state *state)
231 ila_xlat_addr(skb, ILA_DIR_IN);
232 return NF_ACCEPT;
235 static struct nf_hook_ops ila_nf_hook_ops[] __read_mostly = {
237 .hook = ila_nf_input,
238 .pf = NFPROTO_IPV6,
239 .hooknum = NF_INET_PRE_ROUTING,
240 .priority = -1,
244 static int ila_add_mapping(struct net *net, struct ila_xlat_params *p)
246 struct ila_net *ilan = net_generic(net, ila_net_id);
247 struct ila_map *ila, *head;
248 spinlock_t *lock = ila_get_lock(ilan, p->identifier);
249 int err = 0, order;
251 if (!ilan->hooks_registered) {
252 /* We defer registering net hooks in the namespace until the
253 * first mapping is added.
255 err = nf_register_net_hooks(net, ila_nf_hook_ops,
256 ARRAY_SIZE(ila_nf_hook_ops));
257 if (err)
258 return err;
260 ilan->hooks_registered = true;
263 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
264 if (!ila)
265 return -ENOMEM;
267 ila->p = *p;
269 if (p->ip.locator_match) {
270 /* Precompute checksum difference for translation since we
271 * know both the old identifier and the new one.
273 ila->p.ip.csum_diff = compute_csum_diff8(
274 (__be32 *)&p->ip.locator_match,
275 (__be32 *)&p->ip.locator);
278 order = ila_order(ila);
280 spin_lock(lock);
282 head = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
283 rht_params);
284 if (!head) {
285 /* New entry for the rhash_table */
286 err = rhashtable_lookup_insert_fast(&ilan->rhash_table,
287 &ila->node, rht_params);
288 } else {
289 struct ila_map *tila = head, *prev = NULL;
291 do {
292 if (!ila_cmp_params(tila, p)) {
293 err = -EEXIST;
294 goto out;
297 if (order > ila_order(tila))
298 break;
300 prev = tila;
301 tila = rcu_dereference_protected(tila->next,
302 lockdep_is_held(lock));
303 } while (tila);
305 if (prev) {
306 /* Insert in sub list of head */
307 RCU_INIT_POINTER(ila->next, tila);
308 rcu_assign_pointer(prev->next, ila);
309 } else {
310 /* Make this ila new head */
311 RCU_INIT_POINTER(ila->next, head);
312 err = rhashtable_replace_fast(&ilan->rhash_table,
313 &head->node,
314 &ila->node, rht_params);
315 if (err)
316 goto out;
320 out:
321 spin_unlock(lock);
323 if (err)
324 kfree(ila);
326 return err;
329 static int ila_del_mapping(struct net *net, struct ila_xlat_params *p)
331 struct ila_net *ilan = net_generic(net, ila_net_id);
332 struct ila_map *ila, *head, *prev;
333 spinlock_t *lock = ila_get_lock(ilan, p->identifier);
334 int err = -ENOENT;
336 spin_lock(lock);
338 head = rhashtable_lookup_fast(&ilan->rhash_table,
339 &p->identifier, rht_params);
340 ila = head;
342 prev = NULL;
344 while (ila) {
345 if (ila_cmp_params(ila, p)) {
346 prev = ila;
347 ila = rcu_dereference_protected(ila->next,
348 lockdep_is_held(lock));
349 continue;
352 err = 0;
354 if (prev) {
355 /* Not head, just delete from list */
356 rcu_assign_pointer(prev->next, ila->next);
357 } else {
358 /* It is the head. If there is something in the
359 * sublist we need to make a new head.
361 head = rcu_dereference_protected(ila->next,
362 lockdep_is_held(lock));
363 if (head) {
364 /* Put first entry in the sublist into the
365 * table
367 err = rhashtable_replace_fast(
368 &ilan->rhash_table, &ila->node,
369 &head->node, rht_params);
370 if (err)
371 goto out;
372 } else {
373 /* Entry no longer used */
374 err = rhashtable_remove_fast(&ilan->rhash_table,
375 &ila->node,
376 rht_params);
380 ila_release(ila);
382 break;
385 out:
386 spin_unlock(lock);
388 return err;
391 static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
393 struct net *net = genl_info_net(info);
394 struct ila_xlat_params p;
395 int err;
397 err = parse_nl_config(info, &p);
398 if (err)
399 return err;
401 return ila_add_mapping(net, &p);
404 static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
406 struct net *net = genl_info_net(info);
407 struct ila_xlat_params p;
408 int err;
410 err = parse_nl_config(info, &p);
411 if (err)
412 return err;
414 ila_del_mapping(net, &p);
416 return 0;
419 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
421 if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER,
422 (__force u64)ila->p.identifier) ||
423 nla_put_u64(msg, ILA_ATTR_LOCATOR,
424 (__force u64)ila->p.ip.locator) ||
425 nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH,
426 (__force u64)ila->p.ip.locator_match) ||
427 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) ||
428 nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir))
429 return -1;
431 return 0;
434 static int ila_dump_info(struct ila_map *ila,
435 u32 portid, u32 seq, u32 flags,
436 struct sk_buff *skb, u8 cmd)
438 void *hdr;
440 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
441 if (!hdr)
442 return -ENOMEM;
444 if (ila_fill_info(ila, skb) < 0)
445 goto nla_put_failure;
447 genlmsg_end(skb, hdr);
448 return 0;
450 nla_put_failure:
451 genlmsg_cancel(skb, hdr);
452 return -EMSGSIZE;
455 static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
457 struct net *net = genl_info_net(info);
458 struct ila_net *ilan = net_generic(net, ila_net_id);
459 struct sk_buff *msg;
460 struct ila_xlat_params p;
461 struct ila_map *ila;
462 int ret;
464 ret = parse_nl_config(info, &p);
465 if (ret)
466 return ret;
468 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
469 if (!msg)
470 return -ENOMEM;
472 rcu_read_lock();
474 ila = ila_lookup_by_params(&p, ilan);
475 if (ila) {
476 ret = ila_dump_info(ila,
477 info->snd_portid,
478 info->snd_seq, 0, msg,
479 info->genlhdr->cmd);
482 rcu_read_unlock();
484 if (ret < 0)
485 goto out_free;
487 return genlmsg_reply(msg, info);
489 out_free:
490 nlmsg_free(msg);
491 return ret;
494 struct ila_dump_iter {
495 struct rhashtable_iter rhiter;
498 static int ila_nl_dump_start(struct netlink_callback *cb)
500 struct net *net = sock_net(cb->skb->sk);
501 struct ila_net *ilan = net_generic(net, ila_net_id);
502 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args;
504 return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter);
507 static int ila_nl_dump_done(struct netlink_callback *cb)
509 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args;
511 rhashtable_walk_exit(&iter->rhiter);
513 return 0;
516 static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
518 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args;
519 struct rhashtable_iter *rhiter = &iter->rhiter;
520 struct ila_map *ila;
521 int ret;
523 ret = rhashtable_walk_start(rhiter);
524 if (ret && ret != -EAGAIN)
525 goto done;
527 for (;;) {
528 ila = rhashtable_walk_next(rhiter);
530 if (IS_ERR(ila)) {
531 if (PTR_ERR(ila) == -EAGAIN)
532 continue;
533 ret = PTR_ERR(ila);
534 goto done;
535 } else if (!ila) {
536 break;
539 while (ila) {
540 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
541 cb->nlh->nlmsg_seq, NLM_F_MULTI,
542 skb, ILA_CMD_GET);
543 if (ret)
544 goto done;
546 ila = rcu_access_pointer(ila->next);
550 ret = skb->len;
552 done:
553 rhashtable_walk_stop(rhiter);
554 return ret;
557 static const struct genl_ops ila_nl_ops[] = {
559 .cmd = ILA_CMD_ADD,
560 .doit = ila_nl_cmd_add_mapping,
561 .policy = ila_nl_policy,
562 .flags = GENL_ADMIN_PERM,
565 .cmd = ILA_CMD_DEL,
566 .doit = ila_nl_cmd_del_mapping,
567 .policy = ila_nl_policy,
568 .flags = GENL_ADMIN_PERM,
571 .cmd = ILA_CMD_GET,
572 .doit = ila_nl_cmd_get_mapping,
573 .start = ila_nl_dump_start,
574 .dumpit = ila_nl_dump,
575 .done = ila_nl_dump_done,
576 .policy = ila_nl_policy,
580 #define ILA_HASH_TABLE_SIZE 1024
582 static __net_init int ila_init_net(struct net *net)
584 int err;
585 struct ila_net *ilan = net_generic(net, ila_net_id);
587 err = alloc_ila_locks(ilan);
588 if (err)
589 return err;
591 rhashtable_init(&ilan->rhash_table, &rht_params);
593 return 0;
596 static __net_exit void ila_exit_net(struct net *net)
598 struct ila_net *ilan = net_generic(net, ila_net_id);
600 rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL);
602 kvfree(ilan->locks);
604 if (ilan->hooks_registered)
605 nf_unregister_net_hooks(net, ila_nf_hook_ops,
606 ARRAY_SIZE(ila_nf_hook_ops));
609 static struct pernet_operations ila_net_ops = {
610 .init = ila_init_net,
611 .exit = ila_exit_net,
612 .id = &ila_net_id,
613 .size = sizeof(struct ila_net),
616 static int ila_xlat_addr(struct sk_buff *skb, int dir)
618 struct ila_map *ila;
619 struct ipv6hdr *ip6h = ipv6_hdr(skb);
620 struct net *net = dev_net(skb->dev);
621 struct ila_net *ilan = net_generic(net, ila_net_id);
622 __be64 identifier, locator_match;
623 size_t nhoff;
625 /* Assumes skb contains a valid IPv6 header that is pulled */
627 identifier = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[8];
628 locator_match = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[0];
629 nhoff = sizeof(struct ipv6hdr);
631 rcu_read_lock();
633 ila = ila_lookup_wildcards(identifier, locator_match,
634 skb->dev->ifindex, dir, ilan);
635 if (ila)
636 update_ipv6_locator(skb, &ila->p.ip);
638 rcu_read_unlock();
640 return 0;
643 int ila_xlat_incoming(struct sk_buff *skb)
645 return ila_xlat_addr(skb, ILA_DIR_IN);
647 EXPORT_SYMBOL(ila_xlat_incoming);
649 int ila_xlat_outgoing(struct sk_buff *skb)
651 return ila_xlat_addr(skb, ILA_DIR_OUT);
653 EXPORT_SYMBOL(ila_xlat_outgoing);
655 int ila_xlat_init(void)
657 int ret;
659 ret = register_pernet_device(&ila_net_ops);
660 if (ret)
661 goto exit;
663 ret = genl_register_family_with_ops(&ila_nl_family,
664 ila_nl_ops);
665 if (ret < 0)
666 goto unregister;
668 return 0;
670 unregister:
671 unregister_pernet_device(&ila_net_ops);
672 exit:
673 return ret;
676 void ila_xlat_fini(void)
678 genl_unregister_family(&ila_nl_family);
679 unregister_pernet_device(&ila_net_ops);