1 // SPDX-License-Identifier: GPL-2.0+
3 * IPv6 IOAM Lightweight Tunnel implementation
6 * Justin Iurman <justin.iurman@uliege.be>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/net.h>
12 #include <linux/in6.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_iptunnel.h>
17 #include <net/lwtunnel.h>
18 #include <net/ioam6.h>
19 #include <net/netlink.h>
21 #include <net/dst_cache.h>
22 #include <net/ip6_route.h>
23 #include <net/addrconf.h>
25 #define IOAM6_MASK_SHORT_FIELDS 0xff100000
26 #define IOAM6_MASK_WIDE_FIELDS 0xe00000
28 struct ioam6_lwt_encap
{
29 struct ipv6_hopopt_hdr eh
;
30 u8 pad
[2]; /* 2-octet padding for 4n-alignment */
31 struct ioam6_hdr ioamh
;
32 struct ioam6_trace_hdr traceh
;
35 struct ioam6_lwt_freq
{
41 struct dst_cache cache
;
42 struct ioam6_lwt_freq freq
;
46 struct in6_addr tunsrc
;
47 struct in6_addr tundst
;
48 struct ioam6_lwt_encap tuninfo
;
51 static const struct netlink_range_validation freq_range
= {
52 .min
= IOAM6_IPTUNNEL_FREQ_MIN
,
53 .max
= IOAM6_IPTUNNEL_FREQ_MAX
,
56 static struct ioam6_lwt
*ioam6_lwt_state(struct lwtunnel_state
*lwt
)
58 return (struct ioam6_lwt
*)lwt
->data
;
61 static struct ioam6_lwt_encap
*ioam6_lwt_info(struct lwtunnel_state
*lwt
)
63 return &ioam6_lwt_state(lwt
)->tuninfo
;
66 static struct ioam6_trace_hdr
*ioam6_lwt_trace(struct lwtunnel_state
*lwt
)
68 return &(ioam6_lwt_state(lwt
)->tuninfo
.traceh
);
71 static const struct nla_policy ioam6_iptunnel_policy
[IOAM6_IPTUNNEL_MAX
+ 1] = {
72 [IOAM6_IPTUNNEL_FREQ_K
] = NLA_POLICY_FULL_RANGE(NLA_U32
, &freq_range
),
73 [IOAM6_IPTUNNEL_FREQ_N
] = NLA_POLICY_FULL_RANGE(NLA_U32
, &freq_range
),
74 [IOAM6_IPTUNNEL_MODE
] = NLA_POLICY_RANGE(NLA_U8
,
75 IOAM6_IPTUNNEL_MODE_MIN
,
76 IOAM6_IPTUNNEL_MODE_MAX
),
77 [IOAM6_IPTUNNEL_SRC
] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr
)),
78 [IOAM6_IPTUNNEL_DST
] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr
)),
79 [IOAM6_IPTUNNEL_TRACE
] = NLA_POLICY_EXACT_LEN(
80 sizeof(struct ioam6_trace_hdr
)),
83 static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr
*trace
)
87 if (!trace
->type_be32
|| !trace
->remlen
||
88 trace
->remlen
> IOAM6_TRACE_DATA_SIZE_MAX
/ 4 ||
89 trace
->type
.bit12
| trace
->type
.bit13
| trace
->type
.bit14
|
90 trace
->type
.bit15
| trace
->type
.bit16
| trace
->type
.bit17
|
91 trace
->type
.bit18
| trace
->type
.bit19
| trace
->type
.bit20
|
92 trace
->type
.bit21
| trace
->type
.bit23
)
96 fields
= be32_to_cpu(trace
->type_be32
);
98 trace
->nodelen
+= hweight32(fields
& IOAM6_MASK_SHORT_FIELDS
)
99 * (sizeof(__be32
) / 4);
100 trace
->nodelen
+= hweight32(fields
& IOAM6_MASK_WIDE_FIELDS
)
101 * (sizeof(__be64
) / 4);
106 static int ioam6_build_state(struct net
*net
, struct nlattr
*nla
,
107 unsigned int family
, const void *cfg
,
108 struct lwtunnel_state
**ts
,
109 struct netlink_ext_ack
*extack
)
111 struct nlattr
*tb
[IOAM6_IPTUNNEL_MAX
+ 1];
112 struct ioam6_lwt_encap
*tuninfo
;
113 struct ioam6_trace_hdr
*trace
;
114 struct lwtunnel_state
*lwt
;
115 struct ioam6_lwt
*ilwt
;
116 int len_aligned
, err
;
120 if (family
!= AF_INET6
)
123 err
= nla_parse_nested(tb
, IOAM6_IPTUNNEL_MAX
, nla
,
124 ioam6_iptunnel_policy
, extack
);
128 if ((!tb
[IOAM6_IPTUNNEL_FREQ_K
] && tb
[IOAM6_IPTUNNEL_FREQ_N
]) ||
129 (tb
[IOAM6_IPTUNNEL_FREQ_K
] && !tb
[IOAM6_IPTUNNEL_FREQ_N
])) {
130 NL_SET_ERR_MSG(extack
, "freq: missing parameter");
132 } else if (!tb
[IOAM6_IPTUNNEL_FREQ_K
] && !tb
[IOAM6_IPTUNNEL_FREQ_N
]) {
133 freq_k
= IOAM6_IPTUNNEL_FREQ_MIN
;
134 freq_n
= IOAM6_IPTUNNEL_FREQ_MIN
;
136 freq_k
= nla_get_u32(tb
[IOAM6_IPTUNNEL_FREQ_K
]);
137 freq_n
= nla_get_u32(tb
[IOAM6_IPTUNNEL_FREQ_N
]);
139 if (freq_k
> freq_n
) {
140 NL_SET_ERR_MSG(extack
, "freq: k > n is forbidden");
145 mode
= nla_get_u8_default(tb
[IOAM6_IPTUNNEL_MODE
],
146 IOAM6_IPTUNNEL_MODE_INLINE
);
148 if (tb
[IOAM6_IPTUNNEL_SRC
] && mode
== IOAM6_IPTUNNEL_MODE_INLINE
) {
149 NL_SET_ERR_MSG(extack
, "no tunnel src expected with this mode");
153 if (!tb
[IOAM6_IPTUNNEL_DST
] && mode
!= IOAM6_IPTUNNEL_MODE_INLINE
) {
154 NL_SET_ERR_MSG(extack
, "this mode needs a tunnel destination");
158 if (!tb
[IOAM6_IPTUNNEL_TRACE
]) {
159 NL_SET_ERR_MSG(extack
, "missing trace");
163 trace
= nla_data(tb
[IOAM6_IPTUNNEL_TRACE
]);
164 if (!ioam6_validate_trace_hdr(trace
)) {
165 NL_SET_ERR_MSG_ATTR(extack
, tb
[IOAM6_IPTUNNEL_TRACE
],
166 "invalid trace validation");
170 len_aligned
= ALIGN(trace
->remlen
* 4, 8);
171 lwt
= lwtunnel_state_alloc(sizeof(*ilwt
) + len_aligned
);
175 ilwt
= ioam6_lwt_state(lwt
);
176 err
= dst_cache_init(&ilwt
->cache
, GFP_ATOMIC
);
180 atomic_set(&ilwt
->pkt_cnt
, 0);
181 ilwt
->freq
.k
= freq_k
;
182 ilwt
->freq
.n
= freq_n
;
186 if (!tb
[IOAM6_IPTUNNEL_SRC
]) {
187 ilwt
->has_tunsrc
= false;
189 ilwt
->has_tunsrc
= true;
190 ilwt
->tunsrc
= nla_get_in6_addr(tb
[IOAM6_IPTUNNEL_SRC
]);
192 if (ipv6_addr_any(&ilwt
->tunsrc
)) {
193 NL_SET_ERR_MSG_ATTR(extack
, tb
[IOAM6_IPTUNNEL_SRC
],
194 "invalid tunnel source address");
200 if (tb
[IOAM6_IPTUNNEL_DST
]) {
201 ilwt
->tundst
= nla_get_in6_addr(tb
[IOAM6_IPTUNNEL_DST
]);
203 if (ipv6_addr_any(&ilwt
->tundst
)) {
204 NL_SET_ERR_MSG_ATTR(extack
, tb
[IOAM6_IPTUNNEL_DST
],
205 "invalid tunnel dest address");
211 tuninfo
= ioam6_lwt_info(lwt
);
212 tuninfo
->eh
.hdrlen
= ((sizeof(*tuninfo
) + len_aligned
) >> 3) - 1;
213 tuninfo
->pad
[0] = IPV6_TLV_PADN
;
214 tuninfo
->ioamh
.type
= IOAM6_TYPE_PREALLOC
;
215 tuninfo
->ioamh
.opt_type
= IPV6_TLV_IOAM
;
216 tuninfo
->ioamh
.opt_len
= sizeof(tuninfo
->ioamh
) - 2 + sizeof(*trace
)
219 memcpy(&tuninfo
->traceh
, trace
, sizeof(*trace
));
221 if (len_aligned
- trace
->remlen
* 4) {
222 tuninfo
->traceh
.data
[trace
->remlen
* 4] = IPV6_TLV_PADN
;
223 tuninfo
->traceh
.data
[trace
->remlen
* 4 + 1] = 2;
226 lwt
->type
= LWTUNNEL_ENCAP_IOAM6
;
227 lwt
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
;
233 dst_cache_destroy(&ilwt
->cache
);
239 static int ioam6_do_fill(struct net
*net
, struct sk_buff
*skb
)
241 struct ioam6_trace_hdr
*trace
;
242 struct ioam6_namespace
*ns
;
244 trace
= (struct ioam6_trace_hdr
*)(skb_transport_header(skb
)
245 + sizeof(struct ipv6_hopopt_hdr
) + 2
246 + sizeof(struct ioam6_hdr
));
248 ns
= ioam6_namespace(net
, trace
->namespace_id
);
250 ioam6_fill_trace_data(skb
, ns
, trace
, false);
255 static int ioam6_do_inline(struct net
*net
, struct sk_buff
*skb
,
256 struct ioam6_lwt_encap
*tuninfo
,
257 struct dst_entry
*cache_dst
)
259 struct ipv6hdr
*oldhdr
, *hdr
;
262 hdrlen
= (tuninfo
->eh
.hdrlen
+ 1) << 3;
264 err
= skb_cow_head(skb
, hdrlen
+ dst_dev_overhead(cache_dst
, skb
));
268 oldhdr
= ipv6_hdr(skb
);
269 skb_pull(skb
, sizeof(*oldhdr
));
270 skb_postpull_rcsum(skb
, skb_network_header(skb
), sizeof(*oldhdr
));
272 skb_push(skb
, sizeof(*oldhdr
) + hdrlen
);
273 skb_reset_network_header(skb
);
274 skb_mac_header_rebuild(skb
);
277 memmove(hdr
, oldhdr
, sizeof(*oldhdr
));
278 tuninfo
->eh
.nexthdr
= hdr
->nexthdr
;
280 skb_set_transport_header(skb
, sizeof(*hdr
));
281 skb_postpush_rcsum(skb
, hdr
, sizeof(*hdr
) + hdrlen
);
283 memcpy(skb_transport_header(skb
), (u8
*)tuninfo
, hdrlen
);
285 hdr
->nexthdr
= NEXTHDR_HOP
;
286 hdr
->payload_len
= cpu_to_be16(skb
->len
- sizeof(*hdr
));
288 return ioam6_do_fill(net
, skb
);
291 static int ioam6_do_encap(struct net
*net
, struct sk_buff
*skb
,
292 struct ioam6_lwt_encap
*tuninfo
,
294 struct in6_addr
*tunsrc
,
295 struct in6_addr
*tundst
,
296 struct dst_entry
*cache_dst
)
298 struct dst_entry
*dst
= skb_dst(skb
);
299 struct ipv6hdr
*hdr
, *inner_hdr
;
300 int hdrlen
, len
, err
;
302 hdrlen
= (tuninfo
->eh
.hdrlen
+ 1) << 3;
303 len
= sizeof(*hdr
) + hdrlen
;
305 err
= skb_cow_head(skb
, len
+ dst_dev_overhead(cache_dst
, skb
));
309 inner_hdr
= ipv6_hdr(skb
);
312 skb_reset_network_header(skb
);
313 skb_mac_header_rebuild(skb
);
314 skb_set_transport_header(skb
, sizeof(*hdr
));
316 tuninfo
->eh
.nexthdr
= NEXTHDR_IPV6
;
317 memcpy(skb_transport_header(skb
), (u8
*)tuninfo
, hdrlen
);
320 memcpy(hdr
, inner_hdr
, sizeof(*hdr
));
322 hdr
->nexthdr
= NEXTHDR_HOP
;
323 hdr
->payload_len
= cpu_to_be16(skb
->len
- sizeof(*hdr
));
324 hdr
->daddr
= *tundst
;
327 memcpy(&hdr
->saddr
, tunsrc
, sizeof(*tunsrc
));
329 ipv6_dev_get_saddr(net
, dst
->dev
, &hdr
->daddr
,
330 IPV6_PREFER_SRC_PUBLIC
, &hdr
->saddr
);
332 skb_postpush_rcsum(skb
, hdr
, len
);
334 return ioam6_do_fill(net
, skb
);
337 static int ioam6_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
339 struct dst_entry
*dst
= skb_dst(skb
), *cache_dst
;
340 struct in6_addr orig_daddr
;
341 struct ioam6_lwt
*ilwt
;
345 if (skb
->protocol
!= htons(ETH_P_IPV6
))
348 ilwt
= ioam6_lwt_state(dst
->lwtstate
);
350 /* Check for insertion frequency (i.e., "k over n" insertions) */
351 pkt_cnt
= atomic_fetch_inc(&ilwt
->pkt_cnt
);
352 if (pkt_cnt
% ilwt
->freq
.n
>= ilwt
->freq
.k
)
355 orig_daddr
= ipv6_hdr(skb
)->daddr
;
358 cache_dst
= dst_cache_get(&ilwt
->cache
);
361 switch (ilwt
->mode
) {
362 case IOAM6_IPTUNNEL_MODE_INLINE
:
364 /* Direct insertion - if there is no Hop-by-Hop yet */
365 if (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_HOP
)
368 err
= ioam6_do_inline(net
, skb
, &ilwt
->tuninfo
, cache_dst
);
373 case IOAM6_IPTUNNEL_MODE_ENCAP
:
375 /* Encapsulation (ip6ip6) */
376 err
= ioam6_do_encap(net
, skb
, &ilwt
->tuninfo
,
377 ilwt
->has_tunsrc
, &ilwt
->tunsrc
,
378 &ilwt
->tundst
, cache_dst
);
383 case IOAM6_IPTUNNEL_MODE_AUTO
:
384 /* Automatic (RFC8200 compliant):
385 * - local packets -> INLINE mode
386 * - in-transit packets -> ENCAP mode
396 if (unlikely(!cache_dst
)) {
397 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
400 memset(&fl6
, 0, sizeof(fl6
));
401 fl6
.daddr
= hdr
->daddr
;
402 fl6
.saddr
= hdr
->saddr
;
403 fl6
.flowlabel
= ip6_flowinfo(hdr
);
404 fl6
.flowi6_mark
= skb
->mark
;
405 fl6
.flowi6_proto
= hdr
->nexthdr
;
407 cache_dst
= ip6_route_output(net
, NULL
, &fl6
);
408 if (cache_dst
->error
) {
409 err
= cache_dst
->error
;
410 dst_release(cache_dst
);
415 dst_cache_set_ip6(&ilwt
->cache
, cache_dst
, &fl6
.saddr
);
418 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(cache_dst
->dev
));
423 if (!ipv6_addr_equal(&orig_daddr
, &ipv6_hdr(skb
)->daddr
)) {
425 skb_dst_set(skb
, cache_dst
);
426 return dst_output(net
, sk
, skb
);
429 return dst
->lwtstate
->orig_output(net
, sk
, skb
);
435 static void ioam6_destroy_state(struct lwtunnel_state
*lwt
)
437 dst_cache_destroy(&ioam6_lwt_state(lwt
)->cache
);
440 static int ioam6_fill_encap_info(struct sk_buff
*skb
,
441 struct lwtunnel_state
*lwtstate
)
443 struct ioam6_lwt
*ilwt
= ioam6_lwt_state(lwtstate
);
446 err
= nla_put_u32(skb
, IOAM6_IPTUNNEL_FREQ_K
, ilwt
->freq
.k
);
450 err
= nla_put_u32(skb
, IOAM6_IPTUNNEL_FREQ_N
, ilwt
->freq
.n
);
454 err
= nla_put_u8(skb
, IOAM6_IPTUNNEL_MODE
, ilwt
->mode
);
458 if (ilwt
->mode
!= IOAM6_IPTUNNEL_MODE_INLINE
) {
459 if (ilwt
->has_tunsrc
) {
460 err
= nla_put_in6_addr(skb
, IOAM6_IPTUNNEL_SRC
,
466 err
= nla_put_in6_addr(skb
, IOAM6_IPTUNNEL_DST
, &ilwt
->tundst
);
471 err
= nla_put(skb
, IOAM6_IPTUNNEL_TRACE
, sizeof(ilwt
->tuninfo
.traceh
),
472 &ilwt
->tuninfo
.traceh
);
477 static int ioam6_encap_nlsize(struct lwtunnel_state
*lwtstate
)
479 struct ioam6_lwt
*ilwt
= ioam6_lwt_state(lwtstate
);
482 nlsize
= nla_total_size(sizeof(ilwt
->freq
.k
)) +
483 nla_total_size(sizeof(ilwt
->freq
.n
)) +
484 nla_total_size(sizeof(ilwt
->mode
)) +
485 nla_total_size(sizeof(ilwt
->tuninfo
.traceh
));
487 if (ilwt
->mode
!= IOAM6_IPTUNNEL_MODE_INLINE
) {
488 if (ilwt
->has_tunsrc
)
489 nlsize
+= nla_total_size(sizeof(ilwt
->tunsrc
));
491 nlsize
+= nla_total_size(sizeof(ilwt
->tundst
));
497 static int ioam6_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
499 struct ioam6_trace_hdr
*trace_a
= ioam6_lwt_trace(a
);
500 struct ioam6_trace_hdr
*trace_b
= ioam6_lwt_trace(b
);
501 struct ioam6_lwt
*ilwt_a
= ioam6_lwt_state(a
);
502 struct ioam6_lwt
*ilwt_b
= ioam6_lwt_state(b
);
504 return (ilwt_a
->freq
.k
!= ilwt_b
->freq
.k
||
505 ilwt_a
->freq
.n
!= ilwt_b
->freq
.n
||
506 ilwt_a
->mode
!= ilwt_b
->mode
||
507 ilwt_a
->has_tunsrc
!= ilwt_b
->has_tunsrc
||
508 (ilwt_a
->mode
!= IOAM6_IPTUNNEL_MODE_INLINE
&&
509 !ipv6_addr_equal(&ilwt_a
->tundst
, &ilwt_b
->tundst
)) ||
510 (ilwt_a
->mode
!= IOAM6_IPTUNNEL_MODE_INLINE
&&
511 ilwt_a
->has_tunsrc
&&
512 !ipv6_addr_equal(&ilwt_a
->tunsrc
, &ilwt_b
->tunsrc
)) ||
513 trace_a
->namespace_id
!= trace_b
->namespace_id
);
516 static const struct lwtunnel_encap_ops ioam6_iptun_ops
= {
517 .build_state
= ioam6_build_state
,
518 .destroy_state
= ioam6_destroy_state
,
519 .output
= ioam6_output
,
520 .fill_encap
= ioam6_fill_encap_info
,
521 .get_encap_size
= ioam6_encap_nlsize
,
522 .cmp_encap
= ioam6_encap_cmp
,
523 .owner
= THIS_MODULE
,
526 int __init
ioam6_iptunnel_init(void)
528 return lwtunnel_encap_add_ops(&ioam6_iptun_ops
, LWTUNNEL_ENCAP_IOAM6
);
531 void ioam6_iptunnel_exit(void)
533 lwtunnel_encap_del_ops(&ioam6_iptun_ops
, LWTUNNEL_ENCAP_IOAM6
);