1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <net/geneve.h>
13 #include <net/vxlan.h>
14 #include <net/erspan.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_tunnel_key.h>
21 #include <net/tc_act/tc_tunnel_key.h>
23 static unsigned int tunnel_key_net_id
;
24 static struct tc_action_ops act_tunnel_key_ops
;
26 static int tunnel_key_act(struct sk_buff
*skb
, const struct tc_action
*a
,
27 struct tcf_result
*res
)
29 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
30 struct tcf_tunnel_key_params
*params
;
33 params
= rcu_dereference_bh(t
->params
);
35 tcf_lastuse_update(&t
->tcf_tm
);
36 tcf_action_update_bstats(&t
->common
, skb
);
37 action
= READ_ONCE(t
->tcf_action
);
39 switch (params
->tcft_action
) {
40 case TCA_TUNNEL_KEY_ACT_RELEASE
:
43 case TCA_TUNNEL_KEY_ACT_SET
:
45 skb_dst_set(skb
, dst_clone(¶ms
->tcft_enc_metadata
->dst
));
48 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
56 static const struct nla_policy
57 enc_opts_policy
[TCA_TUNNEL_KEY_ENC_OPTS_MAX
+ 1] = {
58 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC
] = {
59 .strict_start_type
= TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
},
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
] = { .type
= NLA_NESTED
},
61 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
] = { .type
= NLA_NESTED
},
62 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
] = { .type
= NLA_NESTED
},
65 static const struct nla_policy
66 geneve_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1] = {
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] = { .type
= NLA_U16
},
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] = { .type
= NLA_U8
},
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
] = { .type
= NLA_BINARY
,
73 static const struct nla_policy
74 vxlan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1] = {
75 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
] = { .type
= NLA_U32
},
78 static const struct nla_policy
79 erspan_opt_policy
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1] = {
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
] = { .type
= NLA_U8
},
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
] = { .type
= NLA_U32
},
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] = { .type
= NLA_U8
},
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
] = { .type
= NLA_U8
},
87 tunnel_key_copy_geneve_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
88 struct netlink_ext_ack
*extack
)
90 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
+ 1];
91 int err
, data_len
, opt_len
;
94 err
= nla_parse_nested_deprecated(tb
,
95 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX
,
96 nla
, geneve_opt_policy
, extack
);
100 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
] ||
101 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
] ||
102 !tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]) {
103 NL_SET_ERR_MSG(extack
, "Missing tunnel key geneve option class, type or data");
107 data
= nla_data(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
108 data_len
= nla_len(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
]);
110 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is less than 4 bytes long");
114 NL_SET_ERR_MSG(extack
, "Tunnel key geneve option data is not a multiple of 4 bytes long");
118 opt_len
= sizeof(struct geneve_opt
) + data_len
;
120 struct geneve_opt
*opt
= dst
;
122 WARN_ON(dst_len
< opt_len
);
125 nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
]);
126 opt
->type
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
]);
127 opt
->length
= data_len
/ 4; /* length is in units of 4 bytes */
132 memcpy(opt
+ 1, data
, data_len
);
139 tunnel_key_copy_vxlan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
140 struct netlink_ext_ack
*extack
)
142 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
+ 1];
145 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX
, nla
,
146 vxlan_opt_policy
, extack
);
150 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]) {
151 NL_SET_ERR_MSG(extack
, "Missing tunnel key vxlan option gbp");
156 struct vxlan_metadata
*md
= dst
;
158 md
->gbp
= nla_get_u32(tb
[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
]);
161 return sizeof(struct vxlan_metadata
);
165 tunnel_key_copy_erspan_opt(const struct nlattr
*nla
, void *dst
, int dst_len
,
166 struct netlink_ext_ack
*extack
)
168 struct nlattr
*tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
+ 1];
172 err
= nla_parse_nested(tb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX
, nla
,
173 erspan_opt_policy
, extack
);
177 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]) {
178 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option ver");
182 ver
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
]);
184 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
]) {
185 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option index");
188 } else if (ver
== 2) {
189 if (!tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
] ||
190 !tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
]) {
191 NL_SET_ERR_MSG(extack
, "Missing tunnel key erspan option dir or hwid");
195 NL_SET_ERR_MSG(extack
, "Tunnel key erspan option ver is incorrect");
200 struct erspan_metadata
*md
= dst
;
204 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
];
205 md
->u
.index
= nla_get_be32(nla
);
207 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
];
208 md
->u
.md2
.dir
= nla_get_u8(nla
);
209 nla
= tb
[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
];
210 set_hwid(&md
->u
.md2
, nla_get_u8(nla
));
214 return sizeof(struct erspan_metadata
);
217 static int tunnel_key_copy_opts(const struct nlattr
*nla
, u8
*dst
,
218 int dst_len
, struct netlink_ext_ack
*extack
)
220 int err
, rem
, opt_len
, len
= nla_len(nla
), opts_len
= 0, type
= 0;
221 const struct nlattr
*attr
, *head
= nla_data(nla
);
223 err
= nla_validate_deprecated(head
, len
, TCA_TUNNEL_KEY_ENC_OPTS_MAX
,
224 enc_opts_policy
, extack
);
228 nla_for_each_attr(attr
, head
, len
, rem
) {
229 switch (nla_type(attr
)) {
230 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
231 if (type
&& type
!= TUNNEL_GENEVE_OPT
) {
232 NL_SET_ERR_MSG(extack
, "Duplicate type for geneve options");
235 opt_len
= tunnel_key_copy_geneve_opt(attr
, dst
,
240 if (opts_len
> IP_TUNNEL_OPTS_MAX
) {
241 NL_SET_ERR_MSG(extack
, "Tunnel options exceeds max size");
248 type
= TUNNEL_GENEVE_OPT
;
250 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
252 NL_SET_ERR_MSG(extack
, "Duplicate type for vxlan options");
255 opt_len
= tunnel_key_copy_vxlan_opt(attr
, dst
,
260 type
= TUNNEL_VXLAN_OPT
;
262 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
264 NL_SET_ERR_MSG(extack
, "Duplicate type for erspan options");
267 opt_len
= tunnel_key_copy_erspan_opt(attr
, dst
,
272 type
= TUNNEL_ERSPAN_OPT
;
278 NL_SET_ERR_MSG(extack
, "Empty list of tunnel options");
283 NL_SET_ERR_MSG(extack
, "Trailing data after parsing tunnel key options attributes");
290 static int tunnel_key_get_opts_len(struct nlattr
*nla
,
291 struct netlink_ext_ack
*extack
)
293 return tunnel_key_copy_opts(nla
, NULL
, 0, extack
);
296 static int tunnel_key_opts_set(struct nlattr
*nla
, struct ip_tunnel_info
*info
,
297 int opts_len
, struct netlink_ext_ack
*extack
)
299 info
->options_len
= opts_len
;
300 switch (nla_type(nla_data(nla
))) {
301 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
:
302 #if IS_ENABLED(CONFIG_INET)
303 info
->key
.tun_flags
|= TUNNEL_GENEVE_OPT
;
304 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
307 return -EAFNOSUPPORT
;
309 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
:
310 #if IS_ENABLED(CONFIG_INET)
311 info
->key
.tun_flags
|= TUNNEL_VXLAN_OPT
;
312 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
315 return -EAFNOSUPPORT
;
317 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
:
318 #if IS_ENABLED(CONFIG_INET)
319 info
->key
.tun_flags
|= TUNNEL_ERSPAN_OPT
;
320 return tunnel_key_copy_opts(nla
, ip_tunnel_info_opts(info
),
323 return -EAFNOSUPPORT
;
326 NL_SET_ERR_MSG(extack
, "Cannot set tunnel options for unknown tunnel type");
331 static const struct nla_policy tunnel_key_policy
[TCA_TUNNEL_KEY_MAX
+ 1] = {
332 [TCA_TUNNEL_KEY_PARMS
] = { .len
= sizeof(struct tc_tunnel_key
) },
333 [TCA_TUNNEL_KEY_ENC_IPV4_SRC
] = { .type
= NLA_U32
},
334 [TCA_TUNNEL_KEY_ENC_IPV4_DST
] = { .type
= NLA_U32
},
335 [TCA_TUNNEL_KEY_ENC_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
336 [TCA_TUNNEL_KEY_ENC_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
337 [TCA_TUNNEL_KEY_ENC_KEY_ID
] = { .type
= NLA_U32
},
338 [TCA_TUNNEL_KEY_ENC_DST_PORT
] = {.type
= NLA_U16
},
339 [TCA_TUNNEL_KEY_NO_CSUM
] = { .type
= NLA_U8
},
340 [TCA_TUNNEL_KEY_ENC_OPTS
] = { .type
= NLA_NESTED
},
341 [TCA_TUNNEL_KEY_ENC_TOS
] = { .type
= NLA_U8
},
342 [TCA_TUNNEL_KEY_ENC_TTL
] = { .type
= NLA_U8
},
345 static void tunnel_key_release_params(struct tcf_tunnel_key_params
*p
)
349 if (p
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
)
350 dst_release(&p
->tcft_enc_metadata
->dst
);
355 static int tunnel_key_init(struct net
*net
, struct nlattr
*nla
,
356 struct nlattr
*est
, struct tc_action
**a
,
357 int ovr
, int bind
, bool rtnl_held
,
358 struct tcf_proto
*tp
, u32 act_flags
,
359 struct netlink_ext_ack
*extack
)
361 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
362 struct nlattr
*tb
[TCA_TUNNEL_KEY_MAX
+ 1];
363 struct tcf_tunnel_key_params
*params_new
;
364 struct metadata_dst
*metadata
= NULL
;
365 struct tcf_chain
*goto_ch
= NULL
;
366 struct tc_tunnel_key
*parm
;
367 struct tcf_tunnel_key
*t
;
379 NL_SET_ERR_MSG(extack
, "Tunnel requires attributes to be passed");
383 err
= nla_parse_nested_deprecated(tb
, TCA_TUNNEL_KEY_MAX
, nla
,
384 tunnel_key_policy
, extack
);
386 NL_SET_ERR_MSG(extack
, "Failed to parse nested tunnel key attributes");
390 if (!tb
[TCA_TUNNEL_KEY_PARMS
]) {
391 NL_SET_ERR_MSG(extack
, "Missing tunnel key parameters");
395 parm
= nla_data(tb
[TCA_TUNNEL_KEY_PARMS
]);
397 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
404 switch (parm
->t_action
) {
405 case TCA_TUNNEL_KEY_ACT_RELEASE
:
407 case TCA_TUNNEL_KEY_ACT_SET
:
408 if (tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]) {
411 key32
= nla_get_be32(tb
[TCA_TUNNEL_KEY_ENC_KEY_ID
]);
412 key_id
= key32_to_tunnel_id(key32
);
416 flags
|= TUNNEL_CSUM
;
417 if (tb
[TCA_TUNNEL_KEY_NO_CSUM
] &&
418 nla_get_u8(tb
[TCA_TUNNEL_KEY_NO_CSUM
]))
419 flags
&= ~TUNNEL_CSUM
;
421 if (tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
])
422 dst_port
= nla_get_be16(tb
[TCA_TUNNEL_KEY_ENC_DST_PORT
]);
424 if (tb
[TCA_TUNNEL_KEY_ENC_OPTS
]) {
425 opts_len
= tunnel_key_get_opts_len(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
434 if (tb
[TCA_TUNNEL_KEY_ENC_TOS
])
435 tos
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TOS
]);
437 if (tb
[TCA_TUNNEL_KEY_ENC_TTL
])
438 ttl
= nla_get_u8(tb
[TCA_TUNNEL_KEY_ENC_TTL
]);
440 if (tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
] &&
441 tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]) {
445 saddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_SRC
]);
446 daddr
= nla_get_in_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV4_DST
]);
448 metadata
= __ip_tun_set_dst(saddr
, daddr
, tos
, ttl
,
451 } else if (tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
] &&
452 tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]) {
453 struct in6_addr saddr
;
454 struct in6_addr daddr
;
456 saddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_SRC
]);
457 daddr
= nla_get_in6_addr(tb
[TCA_TUNNEL_KEY_ENC_IPV6_DST
]);
459 metadata
= __ipv6_tun_set_dst(&saddr
, &daddr
, tos
, ttl
, dst_port
,
463 NL_SET_ERR_MSG(extack
, "Missing either ipv4 or ipv6 src and dst");
469 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel metadata dst");
474 #ifdef CONFIG_DST_CACHE
475 ret
= dst_cache_init(&metadata
->u
.tun_info
.dst_cache
, GFP_KERNEL
);
477 goto release_tun_meta
;
481 ret
= tunnel_key_opts_set(tb
[TCA_TUNNEL_KEY_ENC_OPTS
],
482 &metadata
->u
.tun_info
,
485 goto release_tun_meta
;
488 metadata
->u
.tun_info
.mode
|= IP_TUNNEL_INFO_TX
;
491 NL_SET_ERR_MSG(extack
, "Unknown tunnel key action");
497 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
,
498 &act_tunnel_key_ops
, bind
,
501 NL_SET_ERR_MSG(extack
, "Cannot create TC IDR");
502 goto release_tun_meta
;
507 NL_SET_ERR_MSG(extack
, "TC IDR already exists");
509 goto release_tun_meta
;
512 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
516 goto release_tun_meta
;
518 t
= to_tunnel_key(*a
);
520 params_new
= kzalloc(sizeof(*params_new
), GFP_KERNEL
);
521 if (unlikely(!params_new
)) {
522 NL_SET_ERR_MSG(extack
, "Cannot allocate tunnel key parameters");
527 params_new
->tcft_action
= parm
->t_action
;
528 params_new
->tcft_enc_metadata
= metadata
;
530 spin_lock_bh(&t
->tcf_lock
);
531 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
532 params_new
= rcu_replace_pointer(t
->params
, params_new
,
533 lockdep_is_held(&t
->tcf_lock
));
534 spin_unlock_bh(&t
->tcf_lock
);
535 tunnel_key_release_params(params_new
);
537 tcf_chain_put_by_act(goto_ch
);
539 if (ret
== ACT_P_CREATED
)
540 tcf_idr_insert(tn
, *a
);
546 tcf_chain_put_by_act(goto_ch
);
550 dst_release(&metadata
->dst
);
554 tcf_idr_release(*a
, bind
);
556 tcf_idr_cleanup(tn
, index
);
560 static void tunnel_key_release(struct tc_action
*a
)
562 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
563 struct tcf_tunnel_key_params
*params
;
565 params
= rcu_dereference_protected(t
->params
, 1);
566 tunnel_key_release_params(params
);
569 static int tunnel_key_geneve_opts_dump(struct sk_buff
*skb
,
570 const struct ip_tunnel_info
*info
)
572 int len
= info
->options_len
;
573 u8
*src
= (u8
*)(info
+ 1);
574 struct nlattr
*start
;
576 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE
);
581 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
583 if (nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS
,
585 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE
,
587 nla_put(skb
, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA
,
588 opt
->length
* 4, opt
+ 1)) {
589 nla_nest_cancel(skb
, start
);
593 len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
594 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
597 nla_nest_end(skb
, start
);
601 static int tunnel_key_vxlan_opts_dump(struct sk_buff
*skb
,
602 const struct ip_tunnel_info
*info
)
604 struct vxlan_metadata
*md
= (struct vxlan_metadata
*)(info
+ 1);
605 struct nlattr
*start
;
607 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN
);
611 if (nla_put_u32(skb
, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP
, md
->gbp
)) {
612 nla_nest_cancel(skb
, start
);
616 nla_nest_end(skb
, start
);
620 static int tunnel_key_erspan_opts_dump(struct sk_buff
*skb
,
621 const struct ip_tunnel_info
*info
)
623 struct erspan_metadata
*md
= (struct erspan_metadata
*)(info
+ 1);
624 struct nlattr
*start
;
626 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN
);
630 if (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER
, md
->version
))
633 if (md
->version
== 1 &&
634 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX
, md
->u
.index
))
637 if (md
->version
== 2 &&
638 (nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR
,
640 nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID
,
641 get_hwid(&md
->u
.md2
))))
644 nla_nest_end(skb
, start
);
647 nla_nest_cancel(skb
, start
);
651 static int tunnel_key_opts_dump(struct sk_buff
*skb
,
652 const struct ip_tunnel_info
*info
)
654 struct nlattr
*start
;
657 if (!info
->options_len
)
660 start
= nla_nest_start_noflag(skb
, TCA_TUNNEL_KEY_ENC_OPTS
);
664 if (info
->key
.tun_flags
& TUNNEL_GENEVE_OPT
) {
665 err
= tunnel_key_geneve_opts_dump(skb
, info
);
668 } else if (info
->key
.tun_flags
& TUNNEL_VXLAN_OPT
) {
669 err
= tunnel_key_vxlan_opts_dump(skb
, info
);
672 } else if (info
->key
.tun_flags
& TUNNEL_ERSPAN_OPT
) {
673 err
= tunnel_key_erspan_opts_dump(skb
, info
);
678 nla_nest_cancel(skb
, start
);
682 nla_nest_end(skb
, start
);
686 static int tunnel_key_dump_addresses(struct sk_buff
*skb
,
687 const struct ip_tunnel_info
*info
)
689 unsigned short family
= ip_tunnel_info_af(info
);
691 if (family
== AF_INET
) {
692 __be32 saddr
= info
->key
.u
.ipv4
.src
;
693 __be32 daddr
= info
->key
.u
.ipv4
.dst
;
695 if (!nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_SRC
, saddr
) &&
696 !nla_put_in_addr(skb
, TCA_TUNNEL_KEY_ENC_IPV4_DST
, daddr
))
700 if (family
== AF_INET6
) {
701 const struct in6_addr
*saddr6
= &info
->key
.u
.ipv6
.src
;
702 const struct in6_addr
*daddr6
= &info
->key
.u
.ipv6
.dst
;
704 if (!nla_put_in6_addr(skb
,
705 TCA_TUNNEL_KEY_ENC_IPV6_SRC
, saddr6
) &&
706 !nla_put_in6_addr(skb
,
707 TCA_TUNNEL_KEY_ENC_IPV6_DST
, daddr6
))
714 static int tunnel_key_dump(struct sk_buff
*skb
, struct tc_action
*a
,
717 unsigned char *b
= skb_tail_pointer(skb
);
718 struct tcf_tunnel_key
*t
= to_tunnel_key(a
);
719 struct tcf_tunnel_key_params
*params
;
720 struct tc_tunnel_key opt
= {
721 .index
= t
->tcf_index
,
722 .refcnt
= refcount_read(&t
->tcf_refcnt
) - ref
,
723 .bindcnt
= atomic_read(&t
->tcf_bindcnt
) - bind
,
727 spin_lock_bh(&t
->tcf_lock
);
728 params
= rcu_dereference_protected(t
->params
,
729 lockdep_is_held(&t
->tcf_lock
));
730 opt
.action
= t
->tcf_action
;
731 opt
.t_action
= params
->tcft_action
;
733 if (nla_put(skb
, TCA_TUNNEL_KEY_PARMS
, sizeof(opt
), &opt
))
734 goto nla_put_failure
;
736 if (params
->tcft_action
== TCA_TUNNEL_KEY_ACT_SET
) {
737 struct ip_tunnel_info
*info
=
738 ¶ms
->tcft_enc_metadata
->u
.tun_info
;
739 struct ip_tunnel_key
*key
= &info
->key
;
740 __be32 key_id
= tunnel_id_to_key32(key
->tun_id
);
742 if (((key
->tun_flags
& TUNNEL_KEY
) &&
743 nla_put_be32(skb
, TCA_TUNNEL_KEY_ENC_KEY_ID
, key_id
)) ||
744 tunnel_key_dump_addresses(skb
,
745 ¶ms
->tcft_enc_metadata
->u
.tun_info
) ||
747 nla_put_be16(skb
, TCA_TUNNEL_KEY_ENC_DST_PORT
,
749 nla_put_u8(skb
, TCA_TUNNEL_KEY_NO_CSUM
,
750 !(key
->tun_flags
& TUNNEL_CSUM
)) ||
751 tunnel_key_opts_dump(skb
, info
))
752 goto nla_put_failure
;
754 if (key
->tos
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TOS
, key
->tos
))
755 goto nla_put_failure
;
757 if (key
->ttl
&& nla_put_u8(skb
, TCA_TUNNEL_KEY_ENC_TTL
, key
->ttl
))
758 goto nla_put_failure
;
761 tcf_tm_dump(&tm
, &t
->tcf_tm
);
762 if (nla_put_64bit(skb
, TCA_TUNNEL_KEY_TM
, sizeof(tm
),
763 &tm
, TCA_TUNNEL_KEY_PAD
))
764 goto nla_put_failure
;
765 spin_unlock_bh(&t
->tcf_lock
);
770 spin_unlock_bh(&t
->tcf_lock
);
775 static int tunnel_key_walker(struct net
*net
, struct sk_buff
*skb
,
776 struct netlink_callback
*cb
, int type
,
777 const struct tc_action_ops
*ops
,
778 struct netlink_ext_ack
*extack
)
780 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
782 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
785 static int tunnel_key_search(struct net
*net
, struct tc_action
**a
, u32 index
)
787 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
789 return tcf_idr_search(tn
, a
, index
);
792 static struct tc_action_ops act_tunnel_key_ops
= {
793 .kind
= "tunnel_key",
794 .id
= TCA_ID_TUNNEL_KEY
,
795 .owner
= THIS_MODULE
,
796 .act
= tunnel_key_act
,
797 .dump
= tunnel_key_dump
,
798 .init
= tunnel_key_init
,
799 .cleanup
= tunnel_key_release
,
800 .walk
= tunnel_key_walker
,
801 .lookup
= tunnel_key_search
,
802 .size
= sizeof(struct tcf_tunnel_key
),
805 static __net_init
int tunnel_key_init_net(struct net
*net
)
807 struct tc_action_net
*tn
= net_generic(net
, tunnel_key_net_id
);
809 return tc_action_net_init(net
, tn
, &act_tunnel_key_ops
);
812 static void __net_exit
tunnel_key_exit_net(struct list_head
*net_list
)
814 tc_action_net_exit(net_list
, tunnel_key_net_id
);
817 static struct pernet_operations tunnel_key_net_ops
= {
818 .init
= tunnel_key_init_net
,
819 .exit_batch
= tunnel_key_exit_net
,
820 .id
= &tunnel_key_net_id
,
821 .size
= sizeof(struct tc_action_net
),
824 static int __init
tunnel_key_init_module(void)
826 return tcf_register_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
829 static void __exit
tunnel_key_cleanup_module(void)
831 tcf_unregister_action(&act_tunnel_key_ops
, &tunnel_key_net_ops
);
834 module_init(tunnel_key_init_module
);
835 module_exit(tunnel_key_cleanup_module
);
837 MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
838 MODULE_DESCRIPTION("ip tunnel manipulation actions");
839 MODULE_LICENSE("GPL v2");