1 #include <linux/errno.h>
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/skbuff.h>
6 #include <linux/socket.h>
7 #include <linux/types.h>
8 #include <net/checksum.h>
9 #include <net/dst_cache.h>
11 #include <net/ip6_fib.h>
12 #include <net/ip6_route.h>
13 #include <net/lwtunnel.h>
14 #include <net/protocol.h>
15 #include <uapi/linux/ila.h>
20 struct dst_cache dst_cache
;
24 static inline struct ila_lwt
*ila_lwt_lwtunnel(
25 struct lwtunnel_state
*lwt
)
27 return (struct ila_lwt
*)lwt
->data
;
30 static inline struct ila_params
*ila_params_lwtunnel(
31 struct lwtunnel_state
*lwt
)
33 return &ila_lwt_lwtunnel(lwt
)->p
;
36 static int ila_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
38 struct dst_entry
*orig_dst
= skb_dst(skb
);
39 struct rt6_info
*rt
= (struct rt6_info
*)orig_dst
;
40 struct ila_lwt
*ilwt
= ila_lwt_lwtunnel(orig_dst
->lwtstate
);
41 struct dst_entry
*dst
;
44 if (skb
->protocol
!= htons(ETH_P_IPV6
))
47 ila_update_ipv6_locator(skb
, ila_params_lwtunnel(orig_dst
->lwtstate
),
50 if (rt
->rt6i_flags
& (RTF_GATEWAY
| RTF_CACHE
)) {
51 /* Already have a next hop address in route, no need for
54 return orig_dst
->lwtstate
->orig_output(net
, sk
, skb
);
57 dst
= dst_cache_get(&ilwt
->dst_cache
);
59 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
62 /* Lookup a route for the new destination. Take into
63 * account that the base route may already have a gateway.
66 memset(&fl6
, 0, sizeof(fl6
));
67 fl6
.flowi6_oif
= orig_dst
->dev
->ifindex
;
68 fl6
.flowi6_iif
= LOOPBACK_IFINDEX
;
69 fl6
.daddr
= *rt6_nexthop((struct rt6_info
*)orig_dst
,
72 dst
= ip6_route_output(net
, NULL
, &fl6
);
79 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(&fl6
), NULL
, 0);
86 dst_cache_set_ip6(&ilwt
->dst_cache
, dst
, &fl6
.saddr
);
89 skb_dst_set(skb
, dst
);
90 return dst_output(net
, sk
, skb
);
97 static int ila_input(struct sk_buff
*skb
)
99 struct dst_entry
*dst
= skb_dst(skb
);
101 if (skb
->protocol
!= htons(ETH_P_IPV6
))
104 ila_update_ipv6_locator(skb
, ila_params_lwtunnel(dst
->lwtstate
), false);
106 return dst
->lwtstate
->orig_input(skb
);
113 static const struct nla_policy ila_nl_policy
[ILA_ATTR_MAX
+ 1] = {
114 [ILA_ATTR_LOCATOR
] = { .type
= NLA_U64
, },
115 [ILA_ATTR_CSUM_MODE
] = { .type
= NLA_U8
, },
118 static int ila_build_state(struct net_device
*dev
, struct nlattr
*nla
,
119 unsigned int family
, const void *cfg
,
120 struct lwtunnel_state
**ts
)
122 struct ila_lwt
*ilwt
;
123 struct ila_params
*p
;
124 struct nlattr
*tb
[ILA_ATTR_MAX
+ 1];
125 struct lwtunnel_state
*newts
;
126 const struct fib6_config
*cfg6
= cfg
;
127 struct ila_addr
*iaddr
;
130 if (family
!= AF_INET6
)
133 if (cfg6
->fc_dst_len
< 8 * sizeof(struct ila_locator
) + 3) {
134 /* Need to have full locator and at least type field
135 * included in destination
140 iaddr
= (struct ila_addr
*)&cfg6
->fc_dst
;
142 if (!ila_addr_is_ila(iaddr
) || ila_csum_neutral_set(iaddr
->ident
)) {
143 /* Don't allow translation for a non-ILA address or checksum
144 * neutral flag to be set.
149 ret
= nla_parse_nested(tb
, ILA_ATTR_MAX
, nla
,
154 if (!tb
[ILA_ATTR_LOCATOR
])
157 newts
= lwtunnel_state_alloc(sizeof(*ilwt
));
161 ilwt
= ila_lwt_lwtunnel(newts
);
162 ret
= dst_cache_init(&ilwt
->dst_cache
, GFP_ATOMIC
);
168 p
= ila_params_lwtunnel(newts
);
170 p
->locator
.v64
= (__force __be64
)nla_get_u64(tb
[ILA_ATTR_LOCATOR
]);
172 /* Precompute checksum difference for translation since we
173 * know both the old locator and the new one.
175 p
->locator_match
= iaddr
->loc
;
176 p
->csum_diff
= compute_csum_diff8(
177 (__be32
*)&p
->locator_match
, (__be32
*)&p
->locator
);
179 if (tb
[ILA_ATTR_CSUM_MODE
])
180 p
->csum_mode
= nla_get_u8(tb
[ILA_ATTR_CSUM_MODE
]);
182 ila_init_saved_csum(p
);
184 newts
->type
= LWTUNNEL_ENCAP_ILA
;
185 newts
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
|
186 LWTUNNEL_STATE_INPUT_REDIRECT
;
188 if (cfg6
->fc_dst_len
== 8 * sizeof(struct in6_addr
))
196 static void ila_destroy_state(struct lwtunnel_state
*lwt
)
198 dst_cache_destroy(&ila_lwt_lwtunnel(lwt
)->dst_cache
);
201 static int ila_fill_encap_info(struct sk_buff
*skb
,
202 struct lwtunnel_state
*lwtstate
)
204 struct ila_params
*p
= ila_params_lwtunnel(lwtstate
);
206 if (nla_put_u64_64bit(skb
, ILA_ATTR_LOCATOR
, (__force u64
)p
->locator
.v64
,
208 goto nla_put_failure
;
209 if (nla_put_u8(skb
, ILA_ATTR_CSUM_MODE
, (__force u8
)p
->csum_mode
))
210 goto nla_put_failure
;
218 static int ila_encap_nlsize(struct lwtunnel_state
*lwtstate
)
220 return nla_total_size_64bit(sizeof(u64
)) + /* ILA_ATTR_LOCATOR */
221 nla_total_size(sizeof(u8
)) + /* ILA_ATTR_CSUM_MODE */
225 static int ila_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
227 struct ila_params
*a_p
= ila_params_lwtunnel(a
);
228 struct ila_params
*b_p
= ila_params_lwtunnel(b
);
230 return (a_p
->locator
.v64
!= b_p
->locator
.v64
);
233 static const struct lwtunnel_encap_ops ila_encap_ops
= {
234 .build_state
= ila_build_state
,
235 .destroy_state
= ila_destroy_state
,
236 .output
= ila_output
,
238 .fill_encap
= ila_fill_encap_info
,
239 .get_encap_size
= ila_encap_nlsize
,
240 .cmp_encap
= ila_encap_cmp
,
243 int ila_lwt_init(void)
245 return lwtunnel_encap_add_ops(&ila_encap_ops
, LWTUNNEL_ENCAP_ILA
);
248 void ila_lwt_fini(void)
250 lwtunnel_encap_del_ops(&ila_encap_ops
, LWTUNNEL_ENCAP_ILA
);