1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_tunnel_key.h>
21 #include <net/tc_act/tc_tunnel_key.h>
23 static unsigned int tunnel_key_net_id
;
24 static struct tc_action_ops act_tunnel_key_ops
;
26 static int tunnel_key_act(struct sk_buff
*skb
, const struct tc_action
*a
,
27 struct tcf_result
*res
)
29 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
30 struct tcf_tunnel_key_params
*params
;
33 params
= rcu_dereference_bh(t
->params
);
35 tcf_lastuse_update(&t
->tcf_tm
);
36 tcf_action_update_bstats(&t
->common
, skb
);
37 action
= READ_ONCE(t
->tcf_action
);
39 switch (params
->tcft_action
) {
40 case TCA_TUNNEL_KEY_ACT_RELEASE
:
43 case TCA_TUNNEL_KEY_ACT_SET
:
45 skb_dst_set(skb
, dst_clone(¶ms
->tcft_enc_metadata
->dst
));
48 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
56 static const struct nla_policy
57 enc_opts_policy
[TCA_TUNNEL_KEY_ENC_OPTS_MAX
+ 1] = {
58 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC
] = {
59 .strict_start_type
= TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
},
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
61 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
] = { .type
= NLA_NESTED
},
62 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
] = { .type
= NLA_NESTED
},
65 static const struct nla_policy
66 geneve_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
73 static const struct nla_policy
74 vxlan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1] = {
75 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
] = { .type
= NLA_U32
},
78 static const struct nla_policy
79 erspan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1] = {
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
] = { .type
= NLA_U8
},
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
] = { .type
= NLA_U32
},
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] = { .type
= NLA_U8
},
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
] = { .type
= NLA_U8
},
87 tunnel_key_copy_geneve_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
88 struct netlink_ext_ack
*extack
)
90 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1];
91 int err
, data_len
, opt_len
;
94 err
= nla_parse_nested_deprecated(tb
,
95 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
,
96 nla
, geneve_opt_policy
, extack
);
100 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] ||
101 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] ||
102 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]) {
103 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
107 data
= nla_data(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
108 data_len
= nla_len(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
110 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
114 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
118 opt_len
= sizeof(struct geneve_opt
) + data_len
;
120 struct geneve_opt
*opt
= dst
;
122 WARN_ON(dst_len
< opt_len
);
125 nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
]);
126 opt
->type
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
]);
127 opt
->length
= data_len
/ 4; /* length is in units of 4 bytes */
132 memcpy(opt
+ 1, data
, data_len
);
139 tunnel_key_copy_vxlan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
140 struct netlink_ext_ack
*extack
)
142 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1];
145 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
, nla
,
146 vxlan_opt_policy
, extack
);
150 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]) {
151 NL_SET_ERR_MSG(extack
, "Missing tunnel key vxlan option gbp");
156 struct vxlan_metadata
*md
= dst
;
158 md
->gbp
= nla_get_u32(tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]);
159 md
->gbp
&= VXLAN_GBP_MASK
;
162 return sizeof(struct vxlan_metadata
);
166 tunnel_key_copy_erspan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
167 struct netlink_ext_ack
*extack
)
169 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1];
173 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
, nla
,
174 erspan_opt_policy
, extack
);
178 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]) {
179 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option ver");
183 ver
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]);
185 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
]) {
186 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option index");
189 } else if (ver
== 2) {
190 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] ||
191 !tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
]) {
192 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option dir or hwid");
196 NL_SET_ERR_MSG(extack
, "Tunnel key erspan option ver is incorrect");
201 struct erspan_metadata
*md
= dst
;
205 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
];
206 md
->u
.index
= nla_get_be32(nla
);
208 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
];
209 md
->u
.md2
.dir
= nla_get_u8(nla
);
210 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
];
211 set_hwid(&md
->u
.md2
, nla_get_u8(nla
));
215 return sizeof(struct erspan_metadata
);
218 static int tunnel_key_copy_opts(const struct nlattr
*nla
, u8
*dst
,
219 int dst_len
, struct netlink_ext_ack
*extack
)
221 int err
, rem
, opt_len
, len
= nla_len(nla
), opts_len
= 0, type
= 0;
222 const struct nlattr
*attr
, *head
= nla_data(nla
);
224 err
= nla_validate_deprecated(head
, len
, TCA_TUNNEL_KEY_ENC_OPTS_MAX
,
225 enc_opts_policy
, extack
);
229 nla_for_each_attr(attr
, head
, len
, rem
) {
230 switch (nla_type(attr
)) {
231 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
232 if (type
&& type
!= TUNNEL_GENEVE_OPT
) {
233 NL_SET_ERR_MSG(extack
, "Duplicate type for geneve options");
236 opt_len
= tunnel_key_copy_geneve_opt(attr
, dst
,
241 if (opts_len
> IP_TUNNEL_OPTS_MAX
) {
242 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
249 type
= TUNNEL_GENEVE_OPT
;
251 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
253 NL_SET_ERR_MSG(extack
, "Duplicate type for vxlan options");
256 opt_len
= tunnel_key_copy_vxlan_opt(attr
, dst
,
261 type
= TUNNEL_VXLAN_OPT
;
263 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
265 NL_SET_ERR_MSG(extack
, "Duplicate type for erspan options");
268 opt_len
= tunnel_key_copy_erspan_opt(attr
, dst
,
273 type
= TUNNEL_ERSPAN_OPT
;
279 NL_SET_ERR_MSG(extack
, "Empty list of tunnel options");
284 NL_SET_ERR_MSG(extack
, "Trailing data after parsing tunnel key options attributes");
291 static int tunnel_key_get_opts_len(struct nlattr
*nla
,
292 struct netlink_ext_ack
*extack
)
294 return tunnel_key_copy_opts(nla
, NULL
, 0, extack
);
297 static int tunnel_key_opts_set(struct nlattr
*nla
, struct ip_tunnel_info
*info
,
298 int opts_len
, struct netlink_ext_ack
*extack
)
300 info
->options_len
= opts_len
;
301 switch (nla_type(nla_data(nla
))) {
302 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
303 #if IS_ENABLED(CONFIG_INET)
304 info
->key
.tun_flags
|= TUNNEL_GENEVE_OPT
;
305 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
308 return -EAFNOSUPPORT
;
310 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
311 #if IS_ENABLED(CONFIG_INET)
312 info
->key
.tun_flags
|= TUNNEL_VXLAN_OPT
;
313 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
316 return -EAFNOSUPPORT
;
318 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
319 #if IS_ENABLED(CONFIG_INET)
320 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
321 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
324 return -EAFNOSUPPORT
;
327 NL_SET_ERR_MSG(extack
, "Cannot set tunnel options for unknown tunnel type");
332 static const struct nla_policy tunnel_key_policy
[TCA_TUNNEL_KEY_MAX
+ 1] = {
333 [TCA_TUNNEL_KEY_PARMS
] = { .len
= sizeof(struct tc_tunnel_key
) },
334 [TCA_TUNNEL_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
335 [TCA_TUNNEL_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
336 [TCA_TUNNEL_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
337 [TCA_TUNNEL_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
338 [TCA_TUNNEL_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
339 [TCA_TUNNEL_KEY_ENC_DST_PORT
] = {.type
= NLA_U16
},
340 [TCA_TUNNEL_KEY_NO_CSUM
] = { .type
= NLA_U8
},
341 [TCA_TUNNEL_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
342 [TCA_TUNNEL_KEY_ENC_TOS
] = { .type
= NLA_U8
},
343 [TCA_TUNNEL_KEY_ENC_TTL
] = { .type
= NLA_U8
},
346 static void tunnel_key_release_params(struct tcf_tunnel_key_params
*p
)
350 if (p
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
)
351 dst_release(&p
->tcft_enc_metadata
->dst
);
356 static int tunnel_key_init(struct net
*net
, struct nlattr
*nla
,
357 struct nlattr
*est
, struct tc_action
**a
,
358 int ovr
, int bind
, bool rtnl_held
,
359 struct tcf_proto
*tp
, u32 act_flags
,
360 struct netlink_ext_ack
*extack
)
362 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
363 struct nlattr
*tb
[TCA_TUNNEL_KEY_MAX
+ 1];
364 struct tcf_tunnel_key_params
*params_new
;
365 struct metadata_dst
*metadata
= NULL
;
366 struct tcf_chain
*goto_ch
= NULL
;
367 struct tc_tunnel_key
*parm
;
368 struct tcf_tunnel_key
*t
;
380 NL_SET_ERR_MSG(extack
, "Tunnel requires attributes to be passed");
384 err
= nla_parse_nested_deprecated(tb
, TCA_TUNNEL_KEY_MAX
, nla
,
385 tunnel_key_policy
, extack
);
387 NL_SET_ERR_MSG(extack
, "Failed to parse nested tunnel key attributes");
391 if (!tb
[TCA_TUNNEL_KEY_PARMS
]) {
392 NL_SET_ERR_MSG(extack
, "Missing tunnel key parameters");
396 parm
= nla_data(tb
[TCA_TUNNEL_KEY_PARMS
]);
398 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
405 switch (parm
->t_action
) {
406 case TCA_TUNNEL_KEY_ACT_RELEASE
:
408 case TCA_TUNNEL_KEY_ACT_SET
:
409 if (tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]) {
412 key32
= nla_get_be32(tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]);
413 key_id
= key32_to_tunnel_id(key32
);
417 flags
|= TUNNEL_CSUM
;
418 if (tb
[TCA_TUNNEL_KEY_NO_CSUM
] &&
419 nla_get_u8(tb
[TCA_TUNNEL_KEY_NO_CSUM
]))
420 flags
&= ~TUNNEL_CSUM
;
422 if (tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
])
423 dst_port
= nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
]);
425 if (tb
[TCA_TUNNEL_KEY_ENC_OPTS
]) {
426 opts_len
= tunnel_key_get_opts_len(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
435 if (tb
[TCA_TUNNEL_KEY_ENC_TOS
])
436 tos
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TOS
]);
438 if (tb
[TCA_TUNNEL_KEY_ENC_TTL
])
439 ttl
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TTL
]);
441 if (tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
] &&
442 tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]) {
446 saddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
]);
447 daddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]);
449 metadata
= __ip_tun_set_dst(saddr
, daddr
, tos
, ttl
,
452 } else if (tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
] &&
453 tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]) {
454 struct in6_addr saddr
;
455 struct in6_addr daddr
;
457 saddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
]);
458 daddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]);
460 metadata
= __ipv6_tun_set_dst(&saddr
, &daddr
, tos
, ttl
, dst_port
,
464 NL_SET_ERR_MSG(extack
, "Missing either ipv4 or ipv6 src and dst");
470 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel metadata dst");
475 #ifdef CONFIG_DST_CACHE
476 ret
= dst_cache_init(&metadata
->u
.tun_info
.dst_cache
, GFP_KERNEL
);
478 goto release_tun_meta
;
482 ret
= tunnel_key_opts_set(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
483 &metadata
->u
.tun_info
,
486 goto release_tun_meta
;
489 metadata
->u
.tun_info
.mode
|= IP_TUNNEL_INFO_TX
;
492 NL_SET_ERR_MSG(extack
, "Unknown tunnel key action");
498 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
,
499 &act_tunnel_key_ops
, bind
,
502 NL_SET_ERR_MSG(extack
, "Cannot create TC IDR");
503 goto release_tun_meta
;
508 NL_SET_ERR_MSG(extack
, "TC IDR already exists");
510 goto release_tun_meta
;
513 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
517 goto release_tun_meta
;
519 t
= to_tunnel_key(*a
);
521 params_new
= kzalloc(sizeof(*params_new
), GFP_KERNEL
);
522 if (unlikely(!params_new
)) {
523 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel key parameters");
528 params_new
->tcft_action
= parm
->t_action
;
529 params_new
->tcft_enc_metadata
= metadata
;
531 spin_lock_bh(&t
->tcf_lock
);
532 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
533 params_new
= rcu_replace_pointer(t
->params
, params_new
,
534 lockdep_is_held(&t
->tcf_lock
));
535 spin_unlock_bh(&t
->tcf_lock
);
536 tunnel_key_release_params(params_new
);
538 tcf_chain_put_by_act(goto_ch
);
544 tcf_chain_put_by_act(goto_ch
);
548 dst_release(&metadata
->dst
);
552 tcf_idr_release(*a
, bind
);
554 tcf_idr_cleanup(tn
, index
);
558 static void tunnel_key_release(struct tc_action
*a
)
560 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
561 struct tcf_tunnel_key_params
*params
;
563 params
= rcu_dereference_protected(t
->params
, 1);
564 tunnel_key_release_params(params
);
567 static int tunnel_key_geneve_opts_dump(struct sk_buff
*skb
,
568 const struct ip_tunnel_info
*info
)
570 int len
= info
->options_len
;
571 u8
*src
= (u8
*)(info
+ 1);
572 struct nlattr
*start
;
574 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
);
579 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
581 if (nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
,
583 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
,
585 nla_put(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
,
586 opt
->length
* 4, opt
+ 1)) {
587 nla_nest_cancel(skb
, start
);
591 len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
592 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
595 nla_nest_end(skb
, start
);
599 static int tunnel_key_vxlan_opts_dump(struct sk_buff
*skb
,
600 const struct ip_tunnel_info
*info
)
602 struct vxlan_metadata
*md
= (struct vxlan_metadata
*)(info
+ 1);
603 struct nlattr
*start
;
605 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
);
609 if (nla_put_u32(skb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
, md
->gbp
)) {
610 nla_nest_cancel(skb
, start
);
614 nla_nest_end(skb
, start
);
618 static int tunnel_key_erspan_opts_dump(struct sk_buff
*skb
,
619 const struct ip_tunnel_info
*info
)
621 struct erspan_metadata
*md
= (struct erspan_metadata
*)(info
+ 1);
622 struct nlattr
*start
;
624 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
);
628 if (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
, md
->version
))
631 if (md
->version
== 1 &&
632 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
, md
->u
.index
))
635 if (md
->version
== 2 &&
636 (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
,
638 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
,
639 get_hwid(&md
->u
.md2
))))
642 nla_nest_end(skb
, start
);
645 nla_nest_cancel(skb
, start
);
649 static int tunnel_key_opts_dump(struct sk_buff
*skb
,
650 const struct ip_tunnel_info
*info
)
652 struct nlattr
*start
;
655 if (!info
->options_len
)
658 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS
);
662 if (info
->key
.tun_flags
& TUNNEL_GENEVE_OPT
) {
663 err
= tunnel_key_geneve_opts_dump(skb
, info
);
666 } else if (info
->key
.tun_flags
& TUNNEL_VXLAN_OPT
) {
667 err
= tunnel_key_vxlan_opts_dump(skb
, info
);
670 } else if (info
->key
.tun_flags
& TUNNEL_ERSPAN_OPT
) {
671 err
= tunnel_key_erspan_opts_dump(skb
, info
);
676 nla_nest_cancel(skb
, start
);
680 nla_nest_end(skb
, start
);
684 static int tunnel_key_dump_addresses(struct sk_buff
*skb
,
685 const struct ip_tunnel_info
*info
)
687 unsigned short family
= ip_tunnel_info_af(info
);
689 if (family
== AF_INET
) {
690 __be32 saddr
= info
->key
.u
.ipv4
.src
;
691 __be32 daddr
= info
->key
.u
.ipv4
.dst
;
693 if (!nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_SRC
, saddr
) &&
694 !nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_DST
, daddr
))
698 if (family
== AF_INET6
) {
699 const struct in6_addr
*saddr6
= &info
->key
.u
.ipv6
.src
;
700 const struct in6_addr
*daddr6
= &info
->key
.u
.ipv6
.dst
;
702 if (!nla_put_in6_addr(skb
,
703 TCA_TUNNEL_KEY_ENC_IPV6_SRC
, saddr6
) &&
704 !nla_put_in6_addr(skb
,
705 TCA_TUNNEL_KEY_ENC_IPV6_DST
, daddr6
))
712 static int tunnel_key_dump(struct sk_buff
*skb
, struct tc_action
*a
,
715 unsigned char *b
= skb_tail_pointer(skb
);
716 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
717 struct tcf_tunnel_key_params
*params
;
718 struct tc_tunnel_key opt
= {
719 .index
= t
->tcf_index
,
720 .refcnt
= refcount_read(&t
->tcf_refcnt
) - ref
,
721 .bindcnt
= atomic_read(&t
->tcf_bindcnt
) - bind
,
725 spin_lock_bh(&t
->tcf_lock
);
726 params
= rcu_dereference_protected(t
->params
,
727 lockdep_is_held(&t
->tcf_lock
));
728 opt
.action
= t
->tcf_action
;
729 opt
.t_action
= params
->tcft_action
;
731 if (nla_put(skb
, TCA_TUNNEL_KEY_PARMS
, sizeof(opt
), &opt
))
732 goto nla_put_failure
;
734 if (params
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
) {
735 struct ip_tunnel_info
*info
=
736 ¶ms
->tcft_enc_metadata
->u
.tun_info
;
737 struct ip_tunnel_key
*key
= &info
->key
;
738 __be32 key_id
= tunnel_id_to_key32(key
->tun_id
);
740 if (((key
->tun_flags
& TUNNEL_KEY
) &&
741 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_KEY_ID
, key_id
)) ||
742 tunnel_key_dump_addresses(skb
,
743 ¶ms
->tcft_enc_metadata
->u
.tun_info
) ||
745 nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_DST_PORT
,
747 nla_put_u8(skb
, TCA_TUNNEL_KEY_NO_CSUM
,
748 !(key
->tun_flags
& TUNNEL_CSUM
)) ||
749 tunnel_key_opts_dump(skb
, info
))
750 goto nla_put_failure
;
752 if (key
->tos
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TOS
, key
->tos
))
753 goto nla_put_failure
;
755 if (key
->ttl
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TTL
, key
->ttl
))
756 goto nla_put_failure
;
759 tcf_tm_dump(&tm
, &t
->tcf_tm
);
760 if (nla_put_64bit(skb
, TCA_TUNNEL_KEY_TM
, sizeof(tm
),
761 &tm
, TCA_TUNNEL_KEY_PAD
))
762 goto nla_put_failure
;
763 spin_unlock_bh(&t
->tcf_lock
);
768 spin_unlock_bh(&t
->tcf_lock
);
773 static int tunnel_key_walker(struct net
*net
, struct sk_buff
*skb
,
774 struct netlink_callback
*cb
, int type
,
775 const struct tc_action_ops
*ops
,
776 struct netlink_ext_ack
*extack
)
778 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
780 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
783 static int tunnel_key_search(struct net
*net
, struct tc_action
**a
, u32 index
)
785 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
787 return tcf_idr_search(tn
, a
, index
);
790 static struct tc_action_ops act_tunnel_key_ops
= {
791 .kind
= "tunnel_key",
792 .id
= TCA_ID_TUNNEL_KEY
,
793 .owner
= THIS_MODULE
,
794 .act
= tunnel_key_act
,
795 .dump
= tunnel_key_dump
,
796 .init
= tunnel_key_init
,
797 .cleanup
= tunnel_key_release
,
798 .walk
= tunnel_key_walker
,
799 .lookup
= tunnel_key_search
,
800 .size
= sizeof(struct tcf_tunnel_key
),
803 static __net_init
int tunnel_key_init_net(struct net
*net
)
805 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
807 return tc_action_net_init(net
, tn
, &act_tunnel_key_ops
);
810 static void __net_exit
tunnel_key_exit_net(struct list_head
*net_list
)
812 tc_action_net_exit(net_list
, tunnel_key_net_id
);
815 static struct pernet_operations tunnel_key_net_ops
= {
816 .init
= tunnel_key_init_net
,
817 .exit_batch
= tunnel_key_exit_net
,
818 .id
= &tunnel_key_net_id
,
819 .size
= sizeof(struct tc_action_net
),
822 static int __init
tunnel_key_init_module(void)
824 return tcf_register_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
827 static void __exit
tunnel_key_cleanup_module(void)
829 tcf_unregister_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
832 module_init(tunnel_key_init_module
);
833 module_exit(tunnel_key_cleanup_module
);
835 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
836 MODULE_DESCRIPTION("ip tunnel manipulation actions");
837 MODULE_LICENSE("GPL v2");