1 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/types.h>
17 #include <linux/bpf.h>
18 #include <net/lwtunnel.h>
21 struct bpf_prog
*prog
;
26 struct bpf_lwt_prog in
;
27 struct bpf_lwt_prog out
;
28 struct bpf_lwt_prog xmit
;
32 #define MAX_PROG_NAME 256
34 static inline struct bpf_lwt
*bpf_lwt_lwtunnel(struct lwtunnel_state
*lwt
)
36 return (struct bpf_lwt
*)lwt
->data
;
39 #define NO_REDIRECT false
40 #define CAN_REDIRECT true
42 static int run_lwt_bpf(struct sk_buff
*skb
, struct bpf_lwt_prog
*lwt
,
43 struct dst_entry
*dst
, bool can_redirect
)
47 /* Preempt disable is needed to protect per-cpu redirect_info between
48 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
49 * access to maps strictly require a rcu_read_lock() for protection,
50 * mixing with BH RCU lock doesn't work.
54 bpf_compute_data_end(skb
);
55 ret
= bpf_prog_run_save_cb(lwt
->prog
, skb
);
63 if (unlikely(!can_redirect
)) {
64 pr_warn_once("Illegal redirect return code in prog %s\n",
65 lwt
->name
? : "<unknown>");
68 ret
= skb_do_redirect(skb
);
80 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret
);
91 static int bpf_input(struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
97 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
99 ret
= run_lwt_bpf(skb
, &bpf
->in
, dst
, NO_REDIRECT
);
104 if (unlikely(!dst
->lwtstate
->orig_input
)) {
105 pr_warn_once("orig_input not set on dst for prog %s\n",
111 return dst
->lwtstate
->orig_input(skb
);
114 static int bpf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
116 struct dst_entry
*dst
= skb_dst(skb
);
120 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
122 ret
= run_lwt_bpf(skb
, &bpf
->out
, dst
, NO_REDIRECT
);
127 if (unlikely(!dst
->lwtstate
->orig_output
)) {
128 pr_warn_once("orig_output not set on dst for prog %s\n",
134 return dst
->lwtstate
->orig_output(net
, sk
, skb
);
137 static int xmit_check_hhlen(struct sk_buff
*skb
)
139 int hh_len
= skb_dst(skb
)->dev
->hard_header_len
;
141 if (skb_headroom(skb
) < hh_len
) {
142 int nhead
= HH_DATA_ALIGN(hh_len
- skb_headroom(skb
));
144 if (pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
))
151 static int bpf_xmit(struct sk_buff
*skb
)
153 struct dst_entry
*dst
= skb_dst(skb
);
156 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
157 if (bpf
->xmit
.prog
) {
160 ret
= run_lwt_bpf(skb
, &bpf
->xmit
, dst
, CAN_REDIRECT
);
163 /* If the header was expanded, headroom might be too
164 * small for L2 header to come, expand as needed.
166 ret
= xmit_check_hhlen(skb
);
170 return LWTUNNEL_XMIT_CONTINUE
;
172 return LWTUNNEL_XMIT_DONE
;
178 return LWTUNNEL_XMIT_CONTINUE
;
181 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog
*prog
)
184 bpf_prog_put(prog
->prog
);
189 static void bpf_destroy_state(struct lwtunnel_state
*lwt
)
191 struct bpf_lwt
*bpf
= bpf_lwt_lwtunnel(lwt
);
193 bpf_lwt_prog_destroy(&bpf
->in
);
194 bpf_lwt_prog_destroy(&bpf
->out
);
195 bpf_lwt_prog_destroy(&bpf
->xmit
);
198 static const struct nla_policy bpf_prog_policy
[LWT_BPF_PROG_MAX
+ 1] = {
199 [LWT_BPF_PROG_FD
] = { .type
= NLA_U32
, },
200 [LWT_BPF_PROG_NAME
] = { .type
= NLA_NUL_STRING
,
201 .len
= MAX_PROG_NAME
},
204 static int bpf_parse_prog(struct nlattr
*attr
, struct bpf_lwt_prog
*prog
,
205 enum bpf_prog_type type
)
207 struct nlattr
*tb
[LWT_BPF_PROG_MAX
+ 1];
212 ret
= nla_parse_nested(tb
, LWT_BPF_PROG_MAX
, attr
, bpf_prog_policy
,
217 if (!tb
[LWT_BPF_PROG_FD
] || !tb
[LWT_BPF_PROG_NAME
])
220 prog
->name
= nla_memdup(tb
[LWT_BPF_PROG_NAME
], GFP_KERNEL
);
224 fd
= nla_get_u32(tb
[LWT_BPF_PROG_FD
]);
225 p
= bpf_prog_get_type(fd
, type
);
234 static const struct nla_policy bpf_nl_policy
[LWT_BPF_MAX
+ 1] = {
235 [LWT_BPF_IN
] = { .type
= NLA_NESTED
, },
236 [LWT_BPF_OUT
] = { .type
= NLA_NESTED
, },
237 [LWT_BPF_XMIT
] = { .type
= NLA_NESTED
, },
238 [LWT_BPF_XMIT_HEADROOM
] = { .type
= NLA_U32
},
241 static int bpf_build_state(struct nlattr
*nla
,
242 unsigned int family
, const void *cfg
,
243 struct lwtunnel_state
**ts
,
244 struct netlink_ext_ack
*extack
)
246 struct nlattr
*tb
[LWT_BPF_MAX
+ 1];
247 struct lwtunnel_state
*newts
;
251 if (family
!= AF_INET
&& family
!= AF_INET6
)
252 return -EAFNOSUPPORT
;
254 ret
= nla_parse_nested(tb
, LWT_BPF_MAX
, nla
, bpf_nl_policy
, extack
);
258 if (!tb
[LWT_BPF_IN
] && !tb
[LWT_BPF_OUT
] && !tb
[LWT_BPF_XMIT
])
261 newts
= lwtunnel_state_alloc(sizeof(*bpf
));
265 newts
->type
= LWTUNNEL_ENCAP_BPF
;
266 bpf
= bpf_lwt_lwtunnel(newts
);
268 if (tb
[LWT_BPF_IN
]) {
269 newts
->flags
|= LWTUNNEL_STATE_INPUT_REDIRECT
;
270 ret
= bpf_parse_prog(tb
[LWT_BPF_IN
], &bpf
->in
,
271 BPF_PROG_TYPE_LWT_IN
);
276 if (tb
[LWT_BPF_OUT
]) {
277 newts
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
;
278 ret
= bpf_parse_prog(tb
[LWT_BPF_OUT
], &bpf
->out
,
279 BPF_PROG_TYPE_LWT_OUT
);
284 if (tb
[LWT_BPF_XMIT
]) {
285 newts
->flags
|= LWTUNNEL_STATE_XMIT_REDIRECT
;
286 ret
= bpf_parse_prog(tb
[LWT_BPF_XMIT
], &bpf
->xmit
,
287 BPF_PROG_TYPE_LWT_XMIT
);
292 if (tb
[LWT_BPF_XMIT_HEADROOM
]) {
293 u32 headroom
= nla_get_u32(tb
[LWT_BPF_XMIT_HEADROOM
]);
295 if (headroom
> LWT_BPF_MAX_HEADROOM
) {
300 newts
->headroom
= headroom
;
303 bpf
->family
= family
;
309 bpf_destroy_state(newts
);
314 static int bpf_fill_lwt_prog(struct sk_buff
*skb
, int attr
,
315 struct bpf_lwt_prog
*prog
)
322 nest
= nla_nest_start(skb
, attr
);
327 nla_put_string(skb
, LWT_BPF_PROG_NAME
, prog
->name
))
330 return nla_nest_end(skb
, nest
);
333 static int bpf_fill_encap_info(struct sk_buff
*skb
, struct lwtunnel_state
*lwt
)
335 struct bpf_lwt
*bpf
= bpf_lwt_lwtunnel(lwt
);
337 if (bpf_fill_lwt_prog(skb
, LWT_BPF_IN
, &bpf
->in
) < 0 ||
338 bpf_fill_lwt_prog(skb
, LWT_BPF_OUT
, &bpf
->out
) < 0 ||
339 bpf_fill_lwt_prog(skb
, LWT_BPF_XMIT
, &bpf
->xmit
) < 0)
345 static int bpf_encap_nlsize(struct lwtunnel_state
*lwtstate
)
347 int nest_len
= nla_total_size(sizeof(struct nlattr
)) +
348 nla_total_size(MAX_PROG_NAME
) + /* LWT_BPF_PROG_NAME */
351 return nest_len
+ /* LWT_BPF_IN */
352 nest_len
+ /* LWT_BPF_OUT */
353 nest_len
+ /* LWT_BPF_XMIT */
357 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog
*a
, struct bpf_lwt_prog
*b
)
360 * The LWT state is currently rebuilt for delete requests which
361 * results in a new bpf_prog instance. Comparing names for now.
363 if (!a
->name
&& !b
->name
)
366 if (!a
->name
|| !b
->name
)
369 return strcmp(a
->name
, b
->name
);
372 static int bpf_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
374 struct bpf_lwt
*a_bpf
= bpf_lwt_lwtunnel(a
);
375 struct bpf_lwt
*b_bpf
= bpf_lwt_lwtunnel(b
);
377 return bpf_lwt_prog_cmp(&a_bpf
->in
, &b_bpf
->in
) ||
378 bpf_lwt_prog_cmp(&a_bpf
->out
, &b_bpf
->out
) ||
379 bpf_lwt_prog_cmp(&a_bpf
->xmit
, &b_bpf
->xmit
);
382 static const struct lwtunnel_encap_ops bpf_encap_ops
= {
383 .build_state
= bpf_build_state
,
384 .destroy_state
= bpf_destroy_state
,
386 .output
= bpf_output
,
388 .fill_encap
= bpf_fill_encap_info
,
389 .get_encap_size
= bpf_encap_nlsize
,
390 .cmp_encap
= bpf_encap_cmp
,
391 .owner
= THIS_MODULE
,
394 static int __init
bpf_lwt_init(void)
396 return lwtunnel_encap_add_ops(&bpf_encap_ops
, LWTUNNEL_ENCAP_BPF
);
399 subsys_initcall(bpf_lwt_init
)