1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <net/tc_wrapper.h>
21 #include <linux/tc_act/tc_tunnel_key.h>
22 #include <net/tc_act/tc_tunnel_key.h>
24 static struct tc_action_ops act_tunnel_key_ops
;
26 TC_INDIRECT_SCOPE
int tunnel_key_act(struct sk_buff
*skb
,
27 const struct tc_action
*a
,
28 struct tcf_result
*res
)
30 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
31 struct tcf_tunnel_key_params
*params
;
34 params
= rcu_dereference_bh(t
->params
);
36 tcf_lastuse_update(&t
->tcf_tm
);
37 tcf_action_update_bstats(&t
->common
, skb
);
38 action
= READ_ONCE(t
->tcf_action
);
40 switch (params
->tcft_action
) {
41 case TCA_TUNNEL_KEY_ACT_RELEASE
:
44 case TCA_TUNNEL_KEY_ACT_SET
:
46 skb_dst_set(skb
, dst_clone(¶ms
->tcft_enc_metadata
->dst
));
49 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
57 static const struct nla_policy
58 enc_opts_policy
[TCA_TUNNEL_KEY_ENC_OPTS_MAX
+ 1] = {
59 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC
] = {
60 .strict_start_type
= TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
},
61 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
62 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
] = { .type
= NLA_NESTED
},
63 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
] = { .type
= NLA_NESTED
},
66 static const struct nla_policy
67 geneve_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
70 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
74 static const struct nla_policy
75 vxlan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1] = {
76 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
] = { .type
= NLA_U32
},
79 static const struct nla_policy
80 erspan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1] = {
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
] = { .type
= NLA_U8
},
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
] = { .type
= NLA_U32
},
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] = { .type
= NLA_U8
},
84 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
] = { .type
= NLA_U8
},
88 tunnel_key_copy_geneve_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
89 struct netlink_ext_ack
*extack
)
91 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1];
92 int err
, data_len
, opt_len
;
95 err
= nla_parse_nested_deprecated(tb
,
96 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
,
97 nla
, geneve_opt_policy
, extack
);
101 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] ||
102 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] ||
103 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]) {
104 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
108 data
= nla_data(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
109 data_len
= nla_len(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
111 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
115 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
119 opt_len
= sizeof(struct geneve_opt
) + data_len
;
121 struct geneve_opt
*opt
= dst
;
123 WARN_ON(dst_len
< opt_len
);
126 nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
]);
127 opt
->type
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
]);
128 opt
->length
= data_len
/ 4; /* length is in units of 4 bytes */
133 memcpy(opt
+ 1, data
, data_len
);
140 tunnel_key_copy_vxlan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
141 struct netlink_ext_ack
*extack
)
143 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1];
146 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
, nla
,
147 vxlan_opt_policy
, extack
);
151 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]) {
152 NL_SET_ERR_MSG(extack
, "Missing tunnel key vxlan option gbp");
157 struct vxlan_metadata
*md
= dst
;
159 md
->gbp
= nla_get_u32(tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]);
160 md
->gbp
&= VXLAN_GBP_MASK
;
163 return sizeof(struct vxlan_metadata
);
167 tunnel_key_copy_erspan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
168 struct netlink_ext_ack
*extack
)
170 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1];
174 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
, nla
,
175 erspan_opt_policy
, extack
);
179 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]) {
180 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option ver");
184 ver
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]);
186 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
]) {
187 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option index");
190 } else if (ver
== 2) {
191 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] ||
192 !tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
]) {
193 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option dir or hwid");
197 NL_SET_ERR_MSG(extack
, "Tunnel key erspan option ver is incorrect");
202 struct erspan_metadata
*md
= dst
;
206 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
];
207 md
->u
.index
= nla_get_be32(nla
);
209 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
];
210 md
->u
.md2
.dir
= nla_get_u8(nla
);
211 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
];
212 set_hwid(&md
->u
.md2
, nla_get_u8(nla
));
216 return sizeof(struct erspan_metadata
);
219 static int tunnel_key_copy_opts(const struct nlattr
*nla
, u8
*dst
,
220 int dst_len
, struct netlink_ext_ack
*extack
)
222 int err
, rem
, opt_len
, len
= nla_len(nla
), opts_len
= 0, type
= 0;
223 const struct nlattr
*attr
, *head
= nla_data(nla
);
225 err
= nla_validate_deprecated(head
, len
, TCA_TUNNEL_KEY_ENC_OPTS_MAX
,
226 enc_opts_policy
, extack
);
230 nla_for_each_attr(attr
, head
, len
, rem
) {
231 switch (nla_type(attr
)) {
232 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
233 if (type
&& type
!= IP_TUNNEL_GENEVE_OPT_BIT
) {
234 NL_SET_ERR_MSG(extack
, "Duplicate type for geneve options");
237 opt_len
= tunnel_key_copy_geneve_opt(attr
, dst
,
242 if (opts_len
> IP_TUNNEL_OPTS_MAX
) {
243 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
250 type
= IP_TUNNEL_GENEVE_OPT_BIT
;
252 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
254 NL_SET_ERR_MSG(extack
, "Duplicate type for vxlan options");
257 opt_len
= tunnel_key_copy_vxlan_opt(attr
, dst
,
262 type
= IP_TUNNEL_VXLAN_OPT_BIT
;
264 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
266 NL_SET_ERR_MSG(extack
, "Duplicate type for erspan options");
269 opt_len
= tunnel_key_copy_erspan_opt(attr
, dst
,
274 type
= IP_TUNNEL_ERSPAN_OPT_BIT
;
280 NL_SET_ERR_MSG(extack
, "Empty list of tunnel options");
285 NL_SET_ERR_MSG(extack
, "Trailing data after parsing tunnel key options attributes");
292 static int tunnel_key_get_opts_len(struct nlattr
*nla
,
293 struct netlink_ext_ack
*extack
)
295 return tunnel_key_copy_opts(nla
, NULL
, 0, extack
);
298 static int tunnel_key_opts_set(struct nlattr
*nla
, struct ip_tunnel_info
*info
,
299 int opts_len
, struct netlink_ext_ack
*extack
)
301 info
->options_len
= opts_len
;
302 switch (nla_type(nla_data(nla
))) {
303 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
304 #if IS_ENABLED(CONFIG_INET)
305 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT
, info
->key
.tun_flags
);
306 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
309 return -EAFNOSUPPORT
;
311 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
312 #if IS_ENABLED(CONFIG_INET)
313 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT
, info
->key
.tun_flags
);
314 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
317 return -EAFNOSUPPORT
;
319 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
320 #if IS_ENABLED(CONFIG_INET)
321 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT
, info
->key
.tun_flags
);
322 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
325 return -EAFNOSUPPORT
;
328 NL_SET_ERR_MSG(extack
, "Cannot set tunnel options for unknown tunnel type");
333 static const struct nla_policy tunnel_key_policy
[TCA_TUNNEL_KEY_MAX
+ 1] = {
334 [TCA_TUNNEL_KEY_PARMS
] = { .len
= sizeof(struct tc_tunnel_key
) },
335 [TCA_TUNNEL_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
336 [TCA_TUNNEL_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
337 [TCA_TUNNEL_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
338 [TCA_TUNNEL_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
339 [TCA_TUNNEL_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
340 [TCA_TUNNEL_KEY_ENC_DST_PORT
] = {.type
= NLA_U16
},
341 [TCA_TUNNEL_KEY_NO_CSUM
] = { .type
= NLA_U8
},
342 [TCA_TUNNEL_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
343 [TCA_TUNNEL_KEY_ENC_TOS
] = { .type
= NLA_U8
},
344 [TCA_TUNNEL_KEY_ENC_TTL
] = { .type
= NLA_U8
},
347 static void tunnel_key_release_params(struct tcf_tunnel_key_params
*p
)
351 if (p
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
)
352 dst_release(&p
->tcft_enc_metadata
->dst
);
357 static int tunnel_key_init(struct net
*net
, struct nlattr
*nla
,
358 struct nlattr
*est
, struct tc_action
**a
,
359 struct tcf_proto
*tp
, u32 act_flags
,
360 struct netlink_ext_ack
*extack
)
362 struct tc_action_net
*tn
= net_generic(net
, act_tunnel_key_ops
.net_id
);
363 bool bind
= act_flags
& TCA_ACT_FLAGS_BIND
;
364 struct nlattr
*tb
[TCA_TUNNEL_KEY_MAX
+ 1];
365 struct tcf_tunnel_key_params
*params_new
;
366 IP_TUNNEL_DECLARE_FLAGS(flags
) = { };
367 struct metadata_dst
*metadata
= NULL
;
368 struct tcf_chain
*goto_ch
= NULL
;
369 struct tc_tunnel_key
*parm
;
370 struct tcf_tunnel_key
*t
;
381 NL_SET_ERR_MSG(extack
, "Tunnel requires attributes to be passed");
385 err
= nla_parse_nested_deprecated(tb
, TCA_TUNNEL_KEY_MAX
, nla
,
386 tunnel_key_policy
, extack
);
388 NL_SET_ERR_MSG(extack
, "Failed to parse nested tunnel key attributes");
392 if (!tb
[TCA_TUNNEL_KEY_PARMS
]) {
393 NL_SET_ERR_MSG(extack
, "Missing tunnel key parameters");
397 parm
= nla_data(tb
[TCA_TUNNEL_KEY_PARMS
]);
399 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
406 switch (parm
->t_action
) {
407 case TCA_TUNNEL_KEY_ACT_RELEASE
:
409 case TCA_TUNNEL_KEY_ACT_SET
:
410 if (tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]) {
413 key32
= nla_get_be32(tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]);
414 key_id
= key32_to_tunnel_id(key32
);
415 __set_bit(IP_TUNNEL_KEY_BIT
, flags
);
418 __set_bit(IP_TUNNEL_CSUM_BIT
, flags
);
419 if (tb
[TCA_TUNNEL_KEY_NO_CSUM
] &&
420 nla_get_u8(tb
[TCA_TUNNEL_KEY_NO_CSUM
]))
421 __clear_bit(IP_TUNNEL_CSUM_BIT
, flags
);
423 if (nla_get_flag(tb
[TCA_TUNNEL_KEY_NO_FRAG
]))
424 __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT
, flags
);
426 if (tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
])
427 dst_port
= nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
]);
429 if (tb
[TCA_TUNNEL_KEY_ENC_OPTS
]) {
430 opts_len
= tunnel_key_get_opts_len(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
439 if (tb
[TCA_TUNNEL_KEY_ENC_TOS
])
440 tos
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TOS
]);
442 if (tb
[TCA_TUNNEL_KEY_ENC_TTL
])
443 ttl
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TTL
]);
445 if (tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
] &&
446 tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]) {
450 saddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
]);
451 daddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]);
453 metadata
= __ip_tun_set_dst(saddr
, daddr
, tos
, ttl
,
456 } else if (tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
] &&
457 tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]) {
458 struct in6_addr saddr
;
459 struct in6_addr daddr
;
461 saddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
]);
462 daddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]);
464 metadata
= __ipv6_tun_set_dst(&saddr
, &daddr
, tos
, ttl
, dst_port
,
468 NL_SET_ERR_MSG(extack
, "Missing either ipv4 or ipv6 src and dst");
474 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel metadata dst");
479 #ifdef CONFIG_DST_CACHE
480 ret
= dst_cache_init(&metadata
->u
.tun_info
.dst_cache
, GFP_KERNEL
);
482 goto release_tun_meta
;
486 ret
= tunnel_key_opts_set(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
487 &metadata
->u
.tun_info
,
490 goto release_tun_meta
;
493 metadata
->u
.tun_info
.mode
|= IP_TUNNEL_INFO_TX
;
496 NL_SET_ERR_MSG(extack
, "Unknown tunnel key action");
502 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
,
503 &act_tunnel_key_ops
, bind
,
506 NL_SET_ERR_MSG(extack
, "Cannot create TC IDR");
507 goto release_tun_meta
;
511 } else if (!(act_flags
& TCA_ACT_FLAGS_REPLACE
)) {
512 NL_SET_ERR_MSG(extack
, "TC IDR already exists");
514 goto release_tun_meta
;
517 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
521 goto release_tun_meta
;
523 t
= to_tunnel_key(*a
);
525 params_new
= kzalloc(sizeof(*params_new
), GFP_KERNEL
);
526 if (unlikely(!params_new
)) {
527 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel key parameters");
532 params_new
->tcft_action
= parm
->t_action
;
533 params_new
->tcft_enc_metadata
= metadata
;
535 spin_lock_bh(&t
->tcf_lock
);
536 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
537 params_new
= rcu_replace_pointer(t
->params
, params_new
,
538 lockdep_is_held(&t
->tcf_lock
));
539 spin_unlock_bh(&t
->tcf_lock
);
540 tunnel_key_release_params(params_new
);
542 tcf_chain_put_by_act(goto_ch
);
548 tcf_chain_put_by_act(goto_ch
);
552 dst_release(&metadata
->dst
);
556 tcf_idr_release(*a
, bind
);
558 tcf_idr_cleanup(tn
, index
);
562 static void tunnel_key_release(struct tc_action
*a
)
564 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
565 struct tcf_tunnel_key_params
*params
;
567 params
= rcu_dereference_protected(t
->params
, 1);
568 tunnel_key_release_params(params
);
571 static int tunnel_key_geneve_opts_dump(struct sk_buff
*skb
,
572 const struct ip_tunnel_info
*info
)
574 int len
= info
->options_len
;
575 u8
*src
= (u8
*)(info
+ 1);
576 struct nlattr
*start
;
578 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
);
583 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
585 if (nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
,
587 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
,
589 nla_put(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
,
590 opt
->length
* 4, opt
+ 1)) {
591 nla_nest_cancel(skb
, start
);
595 len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
596 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
599 nla_nest_end(skb
, start
);
603 static int tunnel_key_vxlan_opts_dump(struct sk_buff
*skb
,
604 const struct ip_tunnel_info
*info
)
606 struct vxlan_metadata
*md
= (struct vxlan_metadata
*)(info
+ 1);
607 struct nlattr
*start
;
609 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
);
613 if (nla_put_u32(skb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
, md
->gbp
)) {
614 nla_nest_cancel(skb
, start
);
618 nla_nest_end(skb
, start
);
622 static int tunnel_key_erspan_opts_dump(struct sk_buff
*skb
,
623 const struct ip_tunnel_info
*info
)
625 struct erspan_metadata
*md
= (struct erspan_metadata
*)(info
+ 1);
626 struct nlattr
*start
;
628 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
);
632 if (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
, md
->version
))
635 if (md
->version
== 1 &&
636 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
, md
->u
.index
))
639 if (md
->version
== 2 &&
640 (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
,
642 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
,
643 get_hwid(&md
->u
.md2
))))
646 nla_nest_end(skb
, start
);
649 nla_nest_cancel(skb
, start
);
653 static int tunnel_key_opts_dump(struct sk_buff
*skb
,
654 const struct ip_tunnel_info
*info
)
656 struct nlattr
*start
;
659 if (!info
->options_len
)
662 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS
);
666 if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT
, info
->key
.tun_flags
)) {
667 err
= tunnel_key_geneve_opts_dump(skb
, info
);
670 } else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT
, info
->key
.tun_flags
)) {
671 err
= tunnel_key_vxlan_opts_dump(skb
, info
);
674 } else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT
, info
->key
.tun_flags
)) {
675 err
= tunnel_key_erspan_opts_dump(skb
, info
);
680 nla_nest_cancel(skb
, start
);
684 nla_nest_end(skb
, start
);
688 static int tunnel_key_dump_addresses(struct sk_buff
*skb
,
689 const struct ip_tunnel_info
*info
)
691 unsigned short family
= ip_tunnel_info_af(info
);
693 if (family
== AF_INET
) {
694 __be32 saddr
= info
->key
.u
.ipv4
.src
;
695 __be32 daddr
= info
->key
.u
.ipv4
.dst
;
697 if (!nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_SRC
, saddr
) &&
698 !nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_DST
, daddr
))
702 if (family
== AF_INET6
) {
703 const struct in6_addr
*saddr6
= &info
->key
.u
.ipv6
.src
;
704 const struct in6_addr
*daddr6
= &info
->key
.u
.ipv6
.dst
;
706 if (!nla_put_in6_addr(skb
,
707 TCA_TUNNEL_KEY_ENC_IPV6_SRC
, saddr6
) &&
708 !nla_put_in6_addr(skb
,
709 TCA_TUNNEL_KEY_ENC_IPV6_DST
, daddr6
))
716 static int tunnel_key_dump(struct sk_buff
*skb
, struct tc_action
*a
,
719 unsigned char *b
= skb_tail_pointer(skb
);
720 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
721 struct tcf_tunnel_key_params
*params
;
722 struct tc_tunnel_key opt
= {
723 .index
= t
->tcf_index
,
724 .refcnt
= refcount_read(&t
->tcf_refcnt
) - ref
,
725 .bindcnt
= atomic_read(&t
->tcf_bindcnt
) - bind
,
729 spin_lock_bh(&t
->tcf_lock
);
730 params
= rcu_dereference_protected(t
->params
,
731 lockdep_is_held(&t
->tcf_lock
));
732 opt
.action
= t
->tcf_action
;
733 opt
.t_action
= params
->tcft_action
;
735 if (nla_put(skb
, TCA_TUNNEL_KEY_PARMS
, sizeof(opt
), &opt
))
736 goto nla_put_failure
;
738 if (params
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
) {
739 struct ip_tunnel_info
*info
=
740 ¶ms
->tcft_enc_metadata
->u
.tun_info
;
741 struct ip_tunnel_key
*key
= &info
->key
;
742 __be32 key_id
= tunnel_id_to_key32(key
->tun_id
);
744 if ((test_bit(IP_TUNNEL_KEY_BIT
, key
->tun_flags
) &&
745 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_KEY_ID
, key_id
)) ||
746 tunnel_key_dump_addresses(skb
,
747 ¶ms
->tcft_enc_metadata
->u
.tun_info
) ||
749 nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_DST_PORT
,
751 nla_put_u8(skb
, TCA_TUNNEL_KEY_NO_CSUM
,
752 !test_bit(IP_TUNNEL_CSUM_BIT
, key
->tun_flags
)) ||
753 (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT
, key
->tun_flags
) &&
754 nla_put_flag(skb
, TCA_TUNNEL_KEY_NO_FRAG
)) ||
755 tunnel_key_opts_dump(skb
, info
))
756 goto nla_put_failure
;
758 if (key
->tos
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TOS
, key
->tos
))
759 goto nla_put_failure
;
761 if (key
->ttl
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TTL
, key
->ttl
))
762 goto nla_put_failure
;
765 tcf_tm_dump(&tm
, &t
->tcf_tm
);
766 if (nla_put_64bit(skb
, TCA_TUNNEL_KEY_TM
, sizeof(tm
),
767 &tm
, TCA_TUNNEL_KEY_PAD
))
768 goto nla_put_failure
;
769 spin_unlock_bh(&t
->tcf_lock
);
774 spin_unlock_bh(&t
->tcf_lock
);
779 static void tcf_tunnel_encap_put_tunnel(void *priv
)
781 struct ip_tunnel_info
*tunnel
= priv
;
786 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry
*entry
,
787 const struct tc_action
*act
)
789 entry
->tunnel
= tcf_tunnel_info_copy(act
);
792 entry
->destructor
= tcf_tunnel_encap_put_tunnel
;
793 entry
->destructor_priv
= entry
->tunnel
;
797 static int tcf_tunnel_key_offload_act_setup(struct tc_action
*act
,
801 struct netlink_ext_ack
*extack
)
806 struct flow_action_entry
*entry
= entry_data
;
808 if (is_tcf_tunnel_set(act
)) {
809 entry
->id
= FLOW_ACTION_TUNNEL_ENCAP
;
810 err
= tcf_tunnel_encap_get_tunnel(entry
, act
);
813 } else if (is_tcf_tunnel_release(act
)) {
814 entry
->id
= FLOW_ACTION_TUNNEL_DECAP
;
816 NL_SET_ERR_MSG_MOD(extack
, "Unsupported tunnel key mode offload");
821 struct flow_offload_action
*fl_action
= entry_data
;
823 if (is_tcf_tunnel_set(act
))
824 fl_action
->id
= FLOW_ACTION_TUNNEL_ENCAP
;
825 else if (is_tcf_tunnel_release(act
))
826 fl_action
->id
= FLOW_ACTION_TUNNEL_DECAP
;
834 static struct tc_action_ops act_tunnel_key_ops
= {
835 .kind
= "tunnel_key",
836 .id
= TCA_ID_TUNNEL_KEY
,
837 .owner
= THIS_MODULE
,
838 .act
= tunnel_key_act
,
839 .dump
= tunnel_key_dump
,
840 .init
= tunnel_key_init
,
841 .cleanup
= tunnel_key_release
,
842 .offload_act_setup
= tcf_tunnel_key_offload_act_setup
,
843 .size
= sizeof(struct tcf_tunnel_key
),
845 MODULE_ALIAS_NET_ACT("tunnel_key");
847 static __net_init
int tunnel_key_init_net(struct net
*net
)
849 struct tc_action_net
*tn
= net_generic(net
, act_tunnel_key_ops
.net_id
);
851 return tc_action_net_init(net
, tn
, &act_tunnel_key_ops
);
854 static void __net_exit
tunnel_key_exit_net(struct list_head
*net_list
)
856 tc_action_net_exit(net_list
, act_tunnel_key_ops
.net_id
);
859 static struct pernet_operations tunnel_key_net_ops
= {
860 .init
= tunnel_key_init_net
,
861 .exit_batch
= tunnel_key_exit_net
,
862 .id
= &act_tunnel_key_ops
.net_id
,
863 .size
= sizeof(struct tc_action_net
),
866 static int __init
tunnel_key_init_module(void)
868 return tcf_register_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
871 static void __exit
tunnel_key_cleanup_module(void)
873 tcf_unregister_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
876 module_init(tunnel_key_init_module
);
877 module_exit(tunnel_key_cleanup_module
);
879 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
880 MODULE_DESCRIPTION("ip tunnel manipulation actions");
881 MODULE_LICENSE("GPL v2");