Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / sched / act_tunnel_key.c
blob85c0d0d5b9da5cdd884028387654090b6a0f7799
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/dst.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_tunnel_key.h>
21 #include <net/tc_act/tc_tunnel_key.h>
23 static unsigned int tunnel_key_net_id;
24 static struct tc_action_ops act_tunnel_key_ops;
26 static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
27 struct tcf_result *res)
29 struct tcf_tunnel_key *t = to_tunnel_key(a);
30 struct tcf_tunnel_key_params *params;
31 int action;
33 params = rcu_dereference_bh(t->params);
35 tcf_lastuse_update(&t->tcf_tm);
36 tcf_action_update_bstats(&t->common, skb);
37 action = READ_ONCE(t->tcf_action);
39 switch (params->tcft_action) {
40 case TCA_TUNNEL_KEY_ACT_RELEASE:
41 skb_dst_drop(skb);
42 break;
43 case TCA_TUNNEL_KEY_ACT_SET:
44 skb_dst_drop(skb);
45 skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
46 break;
47 default:
48 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
49 params->tcft_action);
50 break;
53 return action;
56 static const struct nla_policy
57 enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
58 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
59 .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
61 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
62 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
65 static const struct nla_policy
66 geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
70 .len = 128 },
73 static const struct nla_policy
74 vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
75 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
78 static const struct nla_policy
79 erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
86 static int
87 tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
88 struct netlink_ext_ack *extack)
90 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
91 int err, data_len, opt_len;
92 u8 *data;
94 err = nla_parse_nested_deprecated(tb,
95 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
96 nla, geneve_opt_policy, extack);
97 if (err < 0)
98 return err;
100 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
101 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
102 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
103 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
104 return -EINVAL;
107 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
108 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 if (data_len < 4) {
110 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
111 return -ERANGE;
113 if (data_len % 4) {
114 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
115 return -ERANGE;
118 opt_len = sizeof(struct geneve_opt) + data_len;
119 if (dst) {
120 struct geneve_opt *opt = dst;
122 WARN_ON(dst_len < opt_len);
124 opt->opt_class =
125 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
126 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
127 opt->length = data_len / 4; /* length is in units of 4 bytes */
128 opt->r1 = 0;
129 opt->r2 = 0;
130 opt->r3 = 0;
132 memcpy(opt + 1, data, data_len);
135 return opt_len;
138 static int
139 tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
140 struct netlink_ext_ack *extack)
142 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
143 int err;
145 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
146 vxlan_opt_policy, extack);
147 if (err < 0)
148 return err;
150 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
151 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
152 return -EINVAL;
155 if (dst) {
156 struct vxlan_metadata *md = dst;
158 md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
159 md->gbp &= VXLAN_GBP_MASK;
162 return sizeof(struct vxlan_metadata);
165 static int
166 tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
167 struct netlink_ext_ack *extack)
169 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
170 int err;
171 u8 ver;
173 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
174 erspan_opt_policy, extack);
175 if (err < 0)
176 return err;
178 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
179 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
180 return -EINVAL;
183 ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
184 if (ver == 1) {
185 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
186 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
187 return -EINVAL;
189 } else if (ver == 2) {
190 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
191 !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
192 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
193 return -EINVAL;
195 } else {
196 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
197 return -EINVAL;
200 if (dst) {
201 struct erspan_metadata *md = dst;
203 md->version = ver;
204 if (ver == 1) {
205 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
206 md->u.index = nla_get_be32(nla);
207 } else {
208 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
209 md->u.md2.dir = nla_get_u8(nla);
210 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
211 set_hwid(&md->u.md2, nla_get_u8(nla));
215 return sizeof(struct erspan_metadata);
218 static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
219 int dst_len, struct netlink_ext_ack *extack)
221 int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
222 const struct nlattr *attr, *head = nla_data(nla);
224 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
225 enc_opts_policy, extack);
226 if (err)
227 return err;
229 nla_for_each_attr(attr, head, len, rem) {
230 switch (nla_type(attr)) {
231 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
232 if (type && type != TUNNEL_GENEVE_OPT) {
233 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
234 return -EINVAL;
236 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
237 dst_len, extack);
238 if (opt_len < 0)
239 return opt_len;
240 opts_len += opt_len;
241 if (opts_len > IP_TUNNEL_OPTS_MAX) {
242 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
243 return -EINVAL;
245 if (dst) {
246 dst_len -= opt_len;
247 dst += opt_len;
249 type = TUNNEL_GENEVE_OPT;
250 break;
251 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
252 if (type) {
253 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
254 return -EINVAL;
256 opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
257 dst_len, extack);
258 if (opt_len < 0)
259 return opt_len;
260 opts_len += opt_len;
261 type = TUNNEL_VXLAN_OPT;
262 break;
263 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
264 if (type) {
265 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
266 return -EINVAL;
268 opt_len = tunnel_key_copy_erspan_opt(attr, dst,
269 dst_len, extack);
270 if (opt_len < 0)
271 return opt_len;
272 opts_len += opt_len;
273 type = TUNNEL_ERSPAN_OPT;
274 break;
278 if (!opts_len) {
279 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
280 return -EINVAL;
283 if (rem > 0) {
284 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
285 return -EINVAL;
288 return opts_len;
291 static int tunnel_key_get_opts_len(struct nlattr *nla,
292 struct netlink_ext_ack *extack)
294 return tunnel_key_copy_opts(nla, NULL, 0, extack);
297 static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
298 int opts_len, struct netlink_ext_ack *extack)
300 info->options_len = opts_len;
301 switch (nla_type(nla_data(nla))) {
302 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
303 #if IS_ENABLED(CONFIG_INET)
304 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
305 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
306 opts_len, extack);
307 #else
308 return -EAFNOSUPPORT;
309 #endif
310 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
311 #if IS_ENABLED(CONFIG_INET)
312 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
313 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
314 opts_len, extack);
315 #else
316 return -EAFNOSUPPORT;
317 #endif
318 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
319 #if IS_ENABLED(CONFIG_INET)
320 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
321 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
322 opts_len, extack);
323 #else
324 return -EAFNOSUPPORT;
325 #endif
326 default:
327 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
328 return -EINVAL;
332 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
333 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
334 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
335 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
336 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
337 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
338 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
339 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
340 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
341 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
342 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
343 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
346 static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
348 if (!p)
349 return;
350 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
351 dst_release(&p->tcft_enc_metadata->dst);
353 kfree_rcu(p, rcu);
356 static int tunnel_key_init(struct net *net, struct nlattr *nla,
357 struct nlattr *est, struct tc_action **a,
358 int ovr, int bind, bool rtnl_held,
359 struct tcf_proto *tp, u32 act_flags,
360 struct netlink_ext_ack *extack)
362 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
363 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
364 struct tcf_tunnel_key_params *params_new;
365 struct metadata_dst *metadata = NULL;
366 struct tcf_chain *goto_ch = NULL;
367 struct tc_tunnel_key *parm;
368 struct tcf_tunnel_key *t;
369 bool exists = false;
370 __be16 dst_port = 0;
371 __be64 key_id = 0;
372 int opts_len = 0;
373 __be16 flags = 0;
374 u8 tos, ttl;
375 int ret = 0;
376 u32 index;
377 int err;
379 if (!nla) {
380 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
381 return -EINVAL;
384 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
385 tunnel_key_policy, extack);
386 if (err < 0) {
387 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
388 return err;
391 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
392 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
393 return -EINVAL;
396 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
397 index = parm->index;
398 err = tcf_idr_check_alloc(tn, &index, a, bind);
399 if (err < 0)
400 return err;
401 exists = err;
402 if (exists && bind)
403 return 0;
405 switch (parm->t_action) {
406 case TCA_TUNNEL_KEY_ACT_RELEASE:
407 break;
408 case TCA_TUNNEL_KEY_ACT_SET:
409 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
410 __be32 key32;
412 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
413 key_id = key32_to_tunnel_id(key32);
414 flags = TUNNEL_KEY;
417 flags |= TUNNEL_CSUM;
418 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
419 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
420 flags &= ~TUNNEL_CSUM;
422 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
423 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
425 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
426 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
427 extack);
428 if (opts_len < 0) {
429 ret = opts_len;
430 goto err_out;
434 tos = 0;
435 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
436 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
437 ttl = 0;
438 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
439 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
441 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
442 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
443 __be32 saddr;
444 __be32 daddr;
446 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
447 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
449 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
450 dst_port, flags,
451 key_id, opts_len);
452 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
453 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
454 struct in6_addr saddr;
455 struct in6_addr daddr;
457 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
458 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
460 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
461 0, flags,
462 key_id, opts_len);
463 } else {
464 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
465 ret = -EINVAL;
466 goto err_out;
469 if (!metadata) {
470 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
471 ret = -ENOMEM;
472 goto err_out;
475 #ifdef CONFIG_DST_CACHE
476 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
477 if (ret)
478 goto release_tun_meta;
479 #endif
481 if (opts_len) {
482 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
483 &metadata->u.tun_info,
484 opts_len, extack);
485 if (ret < 0)
486 goto release_tun_meta;
489 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
490 break;
491 default:
492 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
493 ret = -EINVAL;
494 goto err_out;
497 if (!exists) {
498 ret = tcf_idr_create_from_flags(tn, index, est, a,
499 &act_tunnel_key_ops, bind,
500 act_flags);
501 if (ret) {
502 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
503 goto release_tun_meta;
506 ret = ACT_P_CREATED;
507 } else if (!ovr) {
508 NL_SET_ERR_MSG(extack, "TC IDR already exists");
509 ret = -EEXIST;
510 goto release_tun_meta;
513 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
514 if (err < 0) {
515 ret = err;
516 exists = true;
517 goto release_tun_meta;
519 t = to_tunnel_key(*a);
521 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
522 if (unlikely(!params_new)) {
523 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
524 ret = -ENOMEM;
525 exists = true;
526 goto put_chain;
528 params_new->tcft_action = parm->t_action;
529 params_new->tcft_enc_metadata = metadata;
531 spin_lock_bh(&t->tcf_lock);
532 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
533 params_new = rcu_replace_pointer(t->params, params_new,
534 lockdep_is_held(&t->tcf_lock));
535 spin_unlock_bh(&t->tcf_lock);
536 tunnel_key_release_params(params_new);
537 if (goto_ch)
538 tcf_chain_put_by_act(goto_ch);
540 return ret;
542 put_chain:
543 if (goto_ch)
544 tcf_chain_put_by_act(goto_ch);
546 release_tun_meta:
547 if (metadata)
548 dst_release(&metadata->dst);
550 err_out:
551 if (exists)
552 tcf_idr_release(*a, bind);
553 else
554 tcf_idr_cleanup(tn, index);
555 return ret;
558 static void tunnel_key_release(struct tc_action *a)
560 struct tcf_tunnel_key *t = to_tunnel_key(a);
561 struct tcf_tunnel_key_params *params;
563 params = rcu_dereference_protected(t->params, 1);
564 tunnel_key_release_params(params);
567 static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
568 const struct ip_tunnel_info *info)
570 int len = info->options_len;
571 u8 *src = (u8 *)(info + 1);
572 struct nlattr *start;
574 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
575 if (!start)
576 return -EMSGSIZE;
578 while (len > 0) {
579 struct geneve_opt *opt = (struct geneve_opt *)src;
581 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
582 opt->opt_class) ||
583 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
584 opt->type) ||
585 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
586 opt->length * 4, opt + 1)) {
587 nla_nest_cancel(skb, start);
588 return -EMSGSIZE;
591 len -= sizeof(struct geneve_opt) + opt->length * 4;
592 src += sizeof(struct geneve_opt) + opt->length * 4;
595 nla_nest_end(skb, start);
596 return 0;
599 static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
600 const struct ip_tunnel_info *info)
602 struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
603 struct nlattr *start;
605 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
606 if (!start)
607 return -EMSGSIZE;
609 if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
610 nla_nest_cancel(skb, start);
611 return -EMSGSIZE;
614 nla_nest_end(skb, start);
615 return 0;
618 static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
619 const struct ip_tunnel_info *info)
621 struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
622 struct nlattr *start;
624 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
625 if (!start)
626 return -EMSGSIZE;
628 if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
629 goto err;
631 if (md->version == 1 &&
632 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
633 goto err;
635 if (md->version == 2 &&
636 (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
637 md->u.md2.dir) ||
638 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
639 get_hwid(&md->u.md2))))
640 goto err;
642 nla_nest_end(skb, start);
643 return 0;
644 err:
645 nla_nest_cancel(skb, start);
646 return -EMSGSIZE;
649 static int tunnel_key_opts_dump(struct sk_buff *skb,
650 const struct ip_tunnel_info *info)
652 struct nlattr *start;
653 int err = -EINVAL;
655 if (!info->options_len)
656 return 0;
658 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
659 if (!start)
660 return -EMSGSIZE;
662 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
663 err = tunnel_key_geneve_opts_dump(skb, info);
664 if (err)
665 goto err_out;
666 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
667 err = tunnel_key_vxlan_opts_dump(skb, info);
668 if (err)
669 goto err_out;
670 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
671 err = tunnel_key_erspan_opts_dump(skb, info);
672 if (err)
673 goto err_out;
674 } else {
675 err_out:
676 nla_nest_cancel(skb, start);
677 return err;
680 nla_nest_end(skb, start);
681 return 0;
684 static int tunnel_key_dump_addresses(struct sk_buff *skb,
685 const struct ip_tunnel_info *info)
687 unsigned short family = ip_tunnel_info_af(info);
689 if (family == AF_INET) {
690 __be32 saddr = info->key.u.ipv4.src;
691 __be32 daddr = info->key.u.ipv4.dst;
693 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
694 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
695 return 0;
698 if (family == AF_INET6) {
699 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
700 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
702 if (!nla_put_in6_addr(skb,
703 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
704 !nla_put_in6_addr(skb,
705 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
706 return 0;
709 return -EINVAL;
712 static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
713 int bind, int ref)
715 unsigned char *b = skb_tail_pointer(skb);
716 struct tcf_tunnel_key *t = to_tunnel_key(a);
717 struct tcf_tunnel_key_params *params;
718 struct tc_tunnel_key opt = {
719 .index = t->tcf_index,
720 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
721 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
723 struct tcf_t tm;
725 spin_lock_bh(&t->tcf_lock);
726 params = rcu_dereference_protected(t->params,
727 lockdep_is_held(&t->tcf_lock));
728 opt.action = t->tcf_action;
729 opt.t_action = params->tcft_action;
731 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
732 goto nla_put_failure;
734 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
735 struct ip_tunnel_info *info =
736 &params->tcft_enc_metadata->u.tun_info;
737 struct ip_tunnel_key *key = &info->key;
738 __be32 key_id = tunnel_id_to_key32(key->tun_id);
740 if (((key->tun_flags & TUNNEL_KEY) &&
741 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
742 tunnel_key_dump_addresses(skb,
743 &params->tcft_enc_metadata->u.tun_info) ||
744 (key->tp_dst &&
745 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
746 key->tp_dst)) ||
747 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
748 !(key->tun_flags & TUNNEL_CSUM)) ||
749 tunnel_key_opts_dump(skb, info))
750 goto nla_put_failure;
752 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
753 goto nla_put_failure;
755 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
756 goto nla_put_failure;
759 tcf_tm_dump(&tm, &t->tcf_tm);
760 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
761 &tm, TCA_TUNNEL_KEY_PAD))
762 goto nla_put_failure;
763 spin_unlock_bh(&t->tcf_lock);
765 return skb->len;
767 nla_put_failure:
768 spin_unlock_bh(&t->tcf_lock);
769 nlmsg_trim(skb, b);
770 return -1;
773 static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
774 struct netlink_callback *cb, int type,
775 const struct tc_action_ops *ops,
776 struct netlink_ext_ack *extack)
778 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
780 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
783 static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
785 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
787 return tcf_idr_search(tn, a, index);
790 static struct tc_action_ops act_tunnel_key_ops = {
791 .kind = "tunnel_key",
792 .id = TCA_ID_TUNNEL_KEY,
793 .owner = THIS_MODULE,
794 .act = tunnel_key_act,
795 .dump = tunnel_key_dump,
796 .init = tunnel_key_init,
797 .cleanup = tunnel_key_release,
798 .walk = tunnel_key_walker,
799 .lookup = tunnel_key_search,
800 .size = sizeof(struct tcf_tunnel_key),
803 static __net_init int tunnel_key_init_net(struct net *net)
805 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
807 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
810 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
812 tc_action_net_exit(net_list, tunnel_key_net_id);
815 static struct pernet_operations tunnel_key_net_ops = {
816 .init = tunnel_key_init_net,
817 .exit_batch = tunnel_key_exit_net,
818 .id = &tunnel_key_net_id,
819 .size = sizeof(struct tc_action_net),
822 static int __init tunnel_key_init_module(void)
824 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
827 static void __exit tunnel_key_cleanup_module(void)
829 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
832 module_init(tunnel_key_init_module);
833 module_exit(tunnel_key_cleanup_module);
835 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
836 MODULE_DESCRIPTION("ip tunnel manipulation actions");
837 MODULE_LICENSE("GPL v2");