Linux 4.19.133
[linux/fpc-iii.git] / net / netfilter / nft_tunnel.c
blob8ae948fd9dcfcca2d66660f5e3e81396ba9ab809
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
15 struct nft_tunnel {
16 enum nft_tunnel_keys key:8;
17 enum nft_registers dreg:8;
20 static void nft_tunnel_get_eval(const struct nft_expr *expr,
21 struct nft_regs *regs,
22 const struct nft_pktinfo *pkt)
24 const struct nft_tunnel *priv = nft_expr_priv(expr);
25 u32 *dest = &regs->data[priv->dreg];
26 struct ip_tunnel_info *tun_info;
28 tun_info = skb_tunnel_info(pkt->skb);
30 switch (priv->key) {
31 case NFT_TUNNEL_PATH:
32 nft_reg_store8(dest, !!tun_info);
33 break;
34 case NFT_TUNNEL_ID:
35 if (!tun_info) {
36 regs->verdict.code = NFT_BREAK;
37 return;
39 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
40 break;
41 default:
42 WARN_ON(1);
43 regs->verdict.code = NFT_BREAK;
47 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
48 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
49 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
52 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
53 const struct nft_expr *expr,
54 const struct nlattr * const tb[])
56 struct nft_tunnel *priv = nft_expr_priv(expr);
57 u32 len;
59 if (!tb[NFTA_TUNNEL_KEY] ||
60 !tb[NFTA_TUNNEL_DREG])
61 return -EINVAL;
63 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
64 switch (priv->key) {
65 case NFT_TUNNEL_PATH:
66 len = sizeof(u8);
67 break;
68 case NFT_TUNNEL_ID:
69 len = sizeof(u32);
70 break;
71 default:
72 return -EOPNOTSUPP;
75 priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
77 return nft_validate_register_store(ctx, priv->dreg, NULL,
78 NFT_DATA_VALUE, len);
81 static int nft_tunnel_get_dump(struct sk_buff *skb,
82 const struct nft_expr *expr)
84 const struct nft_tunnel *priv = nft_expr_priv(expr);
86 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
87 goto nla_put_failure;
88 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
89 goto nla_put_failure;
90 return 0;
92 nla_put_failure:
93 return -1;
96 static struct nft_expr_type nft_tunnel_type;
97 static const struct nft_expr_ops nft_tunnel_get_ops = {
98 .type = &nft_tunnel_type,
99 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
100 .eval = nft_tunnel_get_eval,
101 .init = nft_tunnel_get_init,
102 .dump = nft_tunnel_get_dump,
105 static struct nft_expr_type nft_tunnel_type __read_mostly = {
106 .name = "tunnel",
107 .ops = &nft_tunnel_get_ops,
108 .policy = nft_tunnel_policy,
109 .maxattr = NFTA_TUNNEL_MAX,
110 .owner = THIS_MODULE,
113 struct nft_tunnel_opts {
114 union {
115 struct vxlan_metadata vxlan;
116 struct erspan_metadata erspan;
117 } u;
118 u32 len;
119 __be16 flags;
122 struct nft_tunnel_obj {
123 struct metadata_dst *md;
124 struct nft_tunnel_opts opts;
127 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
128 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
129 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
132 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
133 const struct nlattr *attr,
134 struct ip_tunnel_info *info)
136 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
137 int err;
139 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
140 nft_tunnel_ip_policy, NULL);
141 if (err < 0)
142 return err;
144 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
145 return -EINVAL;
147 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
148 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
149 if (tb[NFTA_TUNNEL_KEY_IP_DST])
150 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
152 return 0;
155 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
156 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
157 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
158 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
161 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
162 const struct nlattr *attr,
163 struct ip_tunnel_info *info)
165 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
166 int err;
168 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
169 nft_tunnel_ip6_policy, NULL);
170 if (err < 0)
171 return err;
173 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
174 return -EINVAL;
176 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
177 memcpy(&info->key.u.ipv6.src,
178 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
179 sizeof(struct in6_addr));
181 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
182 memcpy(&info->key.u.ipv6.dst,
183 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
184 sizeof(struct in6_addr));
186 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
187 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
189 info->mode |= IP_TUNNEL_INFO_IPV6;
191 return 0;
194 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
195 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
198 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
199 struct nft_tunnel_opts *opts)
201 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
202 int err;
204 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
205 nft_tunnel_opts_vxlan_policy, NULL);
206 if (err < 0)
207 return err;
209 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
210 return -EINVAL;
212 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
214 opts->len = sizeof(struct vxlan_metadata);
215 opts->flags = TUNNEL_VXLAN_OPT;
217 return 0;
220 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
221 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
222 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
223 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
224 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
227 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
228 struct nft_tunnel_opts *opts)
230 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
231 uint8_t hwid, dir;
232 int err, version;
234 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, attr,
235 nft_tunnel_opts_erspan_policy, NULL);
236 if (err < 0)
237 return err;
239 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
240 return -EINVAL;
242 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
243 switch (version) {
244 case ERSPAN_VERSION:
245 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
246 return -EINVAL;
248 opts->u.erspan.u.index =
249 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
250 break;
251 case ERSPAN_VERSION2:
252 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
253 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
254 return -EINVAL;
256 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
257 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
259 set_hwid(&opts->u.erspan.u.md2, hwid);
260 opts->u.erspan.u.md2.dir = dir;
261 break;
262 default:
263 return -EOPNOTSUPP;
265 opts->u.erspan.version = version;
267 opts->len = sizeof(struct erspan_metadata);
268 opts->flags = TUNNEL_ERSPAN_OPT;
270 return 0;
273 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
274 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
275 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
278 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
279 const struct nlattr *attr,
280 struct ip_tunnel_info *info,
281 struct nft_tunnel_opts *opts)
283 struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
284 int err;
286 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
287 nft_tunnel_opts_policy, NULL);
288 if (err < 0)
289 return err;
291 if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
292 err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
293 opts);
294 } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
295 err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
296 opts);
297 } else {
298 return -EOPNOTSUPP;
301 return err;
304 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
305 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
306 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
307 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
308 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
309 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
310 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
311 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
312 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
313 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
316 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
317 const struct nlattr * const tb[],
318 struct nft_object *obj)
320 struct nft_tunnel_obj *priv = nft_obj_data(obj);
321 struct ip_tunnel_info info;
322 struct metadata_dst *md;
323 int err;
325 if (!tb[NFTA_TUNNEL_KEY_ID])
326 return -EINVAL;
328 memset(&info, 0, sizeof(info));
329 info.mode = IP_TUNNEL_INFO_TX;
330 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
331 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
333 if (tb[NFTA_TUNNEL_KEY_IP]) {
334 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
335 if (err < 0)
336 return err;
337 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
338 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
339 if (err < 0)
340 return err;
341 } else {
342 return -EINVAL;
345 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
346 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
348 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
349 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
352 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
353 u32 tun_flags;
355 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
356 if (tun_flags & ~NFT_TUNNEL_F_MASK)
357 return -EOPNOTSUPP;
359 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
360 info.key.tun_flags &= ~TUNNEL_CSUM;
361 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
362 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
363 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
364 info.key.tun_flags |= TUNNEL_SEQ;
366 if (tb[NFTA_TUNNEL_KEY_TOS])
367 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
368 if (tb[NFTA_TUNNEL_KEY_TTL])
369 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
370 else
371 info.key.ttl = U8_MAX;
373 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
374 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
375 &info, &priv->opts);
376 if (err < 0)
377 return err;
380 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
381 if (!md)
382 return -ENOMEM;
384 memcpy(&md->u.tun_info, &info, sizeof(info));
385 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
386 priv->opts.flags);
387 priv->md = md;
389 return 0;
392 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
393 struct nft_regs *regs,
394 const struct nft_pktinfo *pkt)
396 struct nft_tunnel_obj *priv = nft_obj_data(obj);
397 struct sk_buff *skb = pkt->skb;
399 skb_dst_drop(skb);
400 dst_hold((struct dst_entry *) priv->md);
401 skb_dst_set(skb, (struct dst_entry *) priv->md);
404 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
406 struct nlattr *nest;
408 if (info->mode & IP_TUNNEL_INFO_IPV6) {
409 nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP6);
410 if (!nest)
411 return -1;
413 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
414 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
415 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
416 return -1;
418 nla_nest_end(skb, nest);
419 } else {
420 nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP);
421 if (!nest)
422 return -1;
424 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
425 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
426 return -1;
428 nla_nest_end(skb, nest);
431 return 0;
434 static int nft_tunnel_opts_dump(struct sk_buff *skb,
435 struct nft_tunnel_obj *priv)
437 struct nft_tunnel_opts *opts = &priv->opts;
438 struct nlattr *nest;
440 nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_OPTS);
441 if (!nest)
442 return -1;
444 if (opts->flags & TUNNEL_VXLAN_OPT) {
445 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
446 htonl(opts->u.vxlan.gbp)))
447 return -1;
448 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
449 switch (opts->u.erspan.version) {
450 case ERSPAN_VERSION:
451 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
452 opts->u.erspan.u.index))
453 return -1;
454 break;
455 case ERSPAN_VERSION2:
456 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
457 get_hwid(&opts->u.erspan.u.md2)) ||
458 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
459 opts->u.erspan.u.md2.dir))
460 return -1;
461 break;
464 nla_nest_end(skb, nest);
466 return 0;
469 static int nft_tunnel_ports_dump(struct sk_buff *skb,
470 struct ip_tunnel_info *info)
472 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
473 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
474 return -1;
476 return 0;
479 static int nft_tunnel_flags_dump(struct sk_buff *skb,
480 struct ip_tunnel_info *info)
482 u32 flags = 0;
484 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
485 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
486 if (!(info->key.tun_flags & TUNNEL_CSUM))
487 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
488 if (info->key.tun_flags & TUNNEL_SEQ)
489 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
491 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
492 return -1;
494 return 0;
497 static int nft_tunnel_obj_dump(struct sk_buff *skb,
498 struct nft_object *obj, bool reset)
500 struct nft_tunnel_obj *priv = nft_obj_data(obj);
501 struct ip_tunnel_info *info = &priv->md->u.tun_info;
503 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
504 tunnel_id_to_key32(info->key.tun_id)) ||
505 nft_tunnel_ip_dump(skb, info) < 0 ||
506 nft_tunnel_ports_dump(skb, info) < 0 ||
507 nft_tunnel_flags_dump(skb, info) < 0 ||
508 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
509 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
510 nft_tunnel_opts_dump(skb, priv) < 0)
511 goto nla_put_failure;
513 return 0;
515 nla_put_failure:
516 return -1;
519 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
520 struct nft_object *obj)
522 struct nft_tunnel_obj *priv = nft_obj_data(obj);
524 metadata_dst_free(priv->md);
527 static struct nft_object_type nft_tunnel_obj_type;
528 static const struct nft_object_ops nft_tunnel_obj_ops = {
529 .type = &nft_tunnel_obj_type,
530 .size = sizeof(struct nft_tunnel_obj),
531 .eval = nft_tunnel_obj_eval,
532 .init = nft_tunnel_obj_init,
533 .destroy = nft_tunnel_obj_destroy,
534 .dump = nft_tunnel_obj_dump,
537 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
538 .type = NFT_OBJECT_TUNNEL,
539 .ops = &nft_tunnel_obj_ops,
540 .maxattr = NFTA_TUNNEL_KEY_MAX,
541 .policy = nft_tunnel_key_policy,
542 .owner = THIS_MODULE,
545 static int __init nft_tunnel_module_init(void)
547 int err;
549 err = nft_register_expr(&nft_tunnel_type);
550 if (err < 0)
551 return err;
553 err = nft_register_obj(&nft_tunnel_obj_type);
554 if (err < 0)
555 nft_unregister_expr(&nft_tunnel_type);
557 return err;
560 static void __exit nft_tunnel_module_exit(void)
562 nft_unregister_obj(&nft_tunnel_obj_type);
563 nft_unregister_expr(&nft_tunnel_type);
566 module_init(nft_tunnel_module_init);
567 module_exit(nft_tunnel_module_exit);
569 MODULE_LICENSE("GPL");
570 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
571 MODULE_ALIAS_NFT_EXPR("tunnel");
572 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);