1 // SPDX-License-Identifier: GPL-2.0-only
4 * (C) 2020 Alexander Aring <alex.aring@gmail.com>
7 #include <linux/rpl_iptunnel.h>
9 #include <net/dst_cache.h>
10 #include <net/ip6_route.h>
11 #include <net/lwtunnel.h>
15 struct rpl_iptunnel_encap
{
16 struct ipv6_rpl_sr_hdr srh
[0];
20 struct dst_cache cache
;
21 struct rpl_iptunnel_encap tuninfo
;
24 static inline struct rpl_lwt
*rpl_lwt_lwtunnel(struct lwtunnel_state
*lwt
)
26 return (struct rpl_lwt
*)lwt
->data
;
29 static inline struct rpl_iptunnel_encap
*
30 rpl_encap_lwtunnel(struct lwtunnel_state
*lwt
)
32 return &rpl_lwt_lwtunnel(lwt
)->tuninfo
;
35 static const struct nla_policy rpl_iptunnel_policy
[RPL_IPTUNNEL_MAX
+ 1] = {
36 [RPL_IPTUNNEL_SRH
] = { .type
= NLA_BINARY
},
39 static bool rpl_validate_srh(struct net
*net
, struct ipv6_rpl_sr_hdr
*srh
,
44 if ((srh
->hdrlen
<< 3) != seglen
)
47 /* check at least one segment and seglen fit with segments_left */
48 if (!srh
->segments_left
||
49 (srh
->segments_left
* sizeof(struct in6_addr
)) != seglen
)
52 if (srh
->cmpri
|| srh
->cmpre
)
55 err
= ipv6_chk_rpl_srh_loop(net
, srh
->rpl_segaddr
,
60 if (ipv6_addr_type(&srh
->rpl_segaddr
[srh
->segments_left
- 1]) &
67 static int rpl_build_state(struct net
*net
, struct nlattr
*nla
,
68 unsigned int family
, const void *cfg
,
69 struct lwtunnel_state
**ts
,
70 struct netlink_ext_ack
*extack
)
72 struct nlattr
*tb
[RPL_IPTUNNEL_MAX
+ 1];
73 struct lwtunnel_state
*newts
;
74 struct ipv6_rpl_sr_hdr
*srh
;
78 if (family
!= AF_INET6
)
81 err
= nla_parse_nested(tb
, RPL_IPTUNNEL_MAX
, nla
,
82 rpl_iptunnel_policy
, extack
);
86 if (!tb
[RPL_IPTUNNEL_SRH
])
89 srh
= nla_data(tb
[RPL_IPTUNNEL_SRH
]);
90 srh_len
= nla_len(tb
[RPL_IPTUNNEL_SRH
]);
92 if (srh_len
< sizeof(*srh
))
95 /* verify that SRH is consistent */
96 if (!rpl_validate_srh(net
, srh
, srh_len
- sizeof(*srh
)))
99 newts
= lwtunnel_state_alloc(srh_len
+ sizeof(*rlwt
));
103 rlwt
= rpl_lwt_lwtunnel(newts
);
105 err
= dst_cache_init(&rlwt
->cache
, GFP_ATOMIC
);
111 memcpy(&rlwt
->tuninfo
.srh
, srh
, srh_len
);
113 newts
->type
= LWTUNNEL_ENCAP_RPL
;
114 newts
->flags
|= LWTUNNEL_STATE_INPUT_REDIRECT
;
115 newts
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
;
122 static void rpl_destroy_state(struct lwtunnel_state
*lwt
)
124 dst_cache_destroy(&rpl_lwt_lwtunnel(lwt
)->cache
);
127 static int rpl_do_srh_inline(struct sk_buff
*skb
, const struct rpl_lwt
*rlwt
,
128 const struct ipv6_rpl_sr_hdr
*srh
)
130 struct ipv6_rpl_sr_hdr
*isrh
, *csrh
;
131 const struct ipv6hdr
*oldhdr
;
137 oldhdr
= ipv6_hdr(skb
);
139 buf
= kzalloc(ipv6_rpl_srh_alloc_size(srh
->segments_left
- 1) * 2,
144 isrh
= (struct ipv6_rpl_sr_hdr
*)buf
;
145 csrh
= (struct ipv6_rpl_sr_hdr
*)(buf
+ ((srh
->hdrlen
+ 1) << 3));
147 memcpy(isrh
, srh
, sizeof(*isrh
));
148 memcpy(isrh
->rpl_segaddr
, &srh
->rpl_segaddr
[1],
149 (srh
->segments_left
- 1) * 16);
150 isrh
->rpl_segaddr
[srh
->segments_left
- 1] = oldhdr
->daddr
;
152 ipv6_rpl_srh_compress(csrh
, isrh
, &srh
->rpl_segaddr
[0],
153 isrh
->segments_left
- 1);
155 hdrlen
= ((csrh
->hdrlen
+ 1) << 3);
157 err
= skb_cow_head(skb
, hdrlen
+ skb
->mac_len
);
163 skb_pull(skb
, sizeof(struct ipv6hdr
));
164 skb_postpull_rcsum(skb
, skb_network_header(skb
),
165 sizeof(struct ipv6hdr
));
167 skb_push(skb
, sizeof(struct ipv6hdr
) + hdrlen
);
168 skb_reset_network_header(skb
);
169 skb_mac_header_rebuild(skb
);
172 memmove(hdr
, oldhdr
, sizeof(*hdr
));
173 isrh
= (void *)hdr
+ sizeof(*hdr
);
174 memcpy(isrh
, csrh
, hdrlen
);
176 isrh
->nexthdr
= hdr
->nexthdr
;
177 hdr
->nexthdr
= NEXTHDR_ROUTING
;
178 hdr
->daddr
= srh
->rpl_segaddr
[0];
180 ipv6_hdr(skb
)->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
181 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
183 skb_postpush_rcsum(skb
, hdr
, sizeof(struct ipv6hdr
) + hdrlen
);
190 static int rpl_do_srh(struct sk_buff
*skb
, const struct rpl_lwt
*rlwt
)
192 struct dst_entry
*dst
= skb_dst(skb
);
193 struct rpl_iptunnel_encap
*tinfo
;
196 if (skb
->protocol
!= htons(ETH_P_IPV6
))
199 tinfo
= rpl_encap_lwtunnel(dst
->lwtstate
);
201 err
= rpl_do_srh_inline(skb
, rlwt
, tinfo
->srh
);
208 static int rpl_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
210 struct dst_entry
*orig_dst
= skb_dst(skb
);
211 struct dst_entry
*dst
= NULL
;
212 struct rpl_lwt
*rlwt
;
215 rlwt
= rpl_lwt_lwtunnel(orig_dst
->lwtstate
);
217 err
= rpl_do_srh(skb
, rlwt
);
222 dst
= dst_cache_get(&rlwt
->cache
);
225 if (unlikely(!dst
)) {
226 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
229 memset(&fl6
, 0, sizeof(fl6
));
230 fl6
.daddr
= hdr
->daddr
;
231 fl6
.saddr
= hdr
->saddr
;
232 fl6
.flowlabel
= ip6_flowinfo(hdr
);
233 fl6
.flowi6_mark
= skb
->mark
;
234 fl6
.flowi6_proto
= hdr
->nexthdr
;
236 dst
= ip6_route_output(net
, NULL
, &fl6
);
244 dst_cache_set_ip6(&rlwt
->cache
, dst
, &fl6
.saddr
);
249 skb_dst_set(skb
, dst
);
251 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(dst
->dev
));
255 return dst_output(net
, sk
, skb
);
262 static int rpl_input(struct sk_buff
*skb
)
264 struct dst_entry
*orig_dst
= skb_dst(skb
);
265 struct dst_entry
*dst
= NULL
;
266 struct rpl_lwt
*rlwt
;
269 rlwt
= rpl_lwt_lwtunnel(orig_dst
->lwtstate
);
271 err
= rpl_do_srh(skb
, rlwt
);
278 dst
= dst_cache_get(&rlwt
->cache
);
284 ip6_route_input(skb
);
288 dst_cache_set_ip6(&rlwt
->cache
, dst
,
289 &ipv6_hdr(skb
)->saddr
);
293 skb_dst_set(skb
, dst
);
296 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(dst
->dev
));
300 return dst_input(skb
);
303 static int nla_put_rpl_srh(struct sk_buff
*skb
, int attrtype
,
304 struct rpl_iptunnel_encap
*tuninfo
)
306 struct rpl_iptunnel_encap
*data
;
310 len
= RPL_IPTUNNEL_SRH_SIZE(tuninfo
->srh
);
312 nla
= nla_reserve(skb
, attrtype
, len
);
316 data
= nla_data(nla
);
317 memcpy(data
, tuninfo
->srh
, len
);
322 static int rpl_fill_encap_info(struct sk_buff
*skb
,
323 struct lwtunnel_state
*lwtstate
)
325 struct rpl_iptunnel_encap
*tuninfo
= rpl_encap_lwtunnel(lwtstate
);
327 if (nla_put_rpl_srh(skb
, RPL_IPTUNNEL_SRH
, tuninfo
))
333 static int rpl_encap_nlsize(struct lwtunnel_state
*lwtstate
)
335 struct rpl_iptunnel_encap
*tuninfo
= rpl_encap_lwtunnel(lwtstate
);
337 return nla_total_size(RPL_IPTUNNEL_SRH_SIZE(tuninfo
->srh
));
340 static int rpl_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
342 struct rpl_iptunnel_encap
*a_hdr
= rpl_encap_lwtunnel(a
);
343 struct rpl_iptunnel_encap
*b_hdr
= rpl_encap_lwtunnel(b
);
344 int len
= RPL_IPTUNNEL_SRH_SIZE(a_hdr
->srh
);
346 if (len
!= RPL_IPTUNNEL_SRH_SIZE(b_hdr
->srh
))
349 return memcmp(a_hdr
, b_hdr
, len
);
352 static const struct lwtunnel_encap_ops rpl_ops
= {
353 .build_state
= rpl_build_state
,
354 .destroy_state
= rpl_destroy_state
,
355 .output
= rpl_output
,
357 .fill_encap
= rpl_fill_encap_info
,
358 .get_encap_size
= rpl_encap_nlsize
,
359 .cmp_encap
= rpl_encap_cmp
,
360 .owner
= THIS_MODULE
,
363 int __init
rpl_init(void)
367 err
= lwtunnel_encap_add_ops(&rpl_ops
, LWTUNNEL_ENCAP_RPL
);
371 pr_info("RPL Segment Routing with IPv6\n");
381 lwtunnel_encap_del_ops(&rpl_ops
, LWTUNNEL_ENCAP_RPL
);