1 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/types.h>
17 #include <linux/bpf.h>
18 #include <net/lwtunnel.h>
21 struct bpf_prog
*prog
;
26 struct bpf_lwt_prog in
;
27 struct bpf_lwt_prog out
;
28 struct bpf_lwt_prog xmit
;
32 #define MAX_PROG_NAME 256
34 static inline struct bpf_lwt
*bpf_lwt_lwtunnel(struct lwtunnel_state
*lwt
)
36 return (struct bpf_lwt
*)lwt
->data
;
39 #define NO_REDIRECT false
40 #define CAN_REDIRECT true
42 static int run_lwt_bpf(struct sk_buff
*skb
, struct bpf_lwt_prog
*lwt
,
43 struct dst_entry
*dst
, bool can_redirect
)
47 /* Preempt disable is needed to protect per-cpu redirect_info between
48 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
49 * access to maps strictly require a rcu_read_lock() for protection,
50 * mixing with BH RCU lock doesn't work.
54 bpf_compute_data_end(skb
);
55 ret
= bpf_prog_run_save_cb(lwt
->prog
, skb
);
63 if (unlikely(!can_redirect
)) {
64 pr_warn_once("Illegal redirect return code in prog %s\n",
65 lwt
->name
? : "<unknown>");
68 skb_reset_mac_header(skb
);
69 ret
= skb_do_redirect(skb
);
81 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret
);
92 static int bpf_input(struct sk_buff
*skb
)
94 struct dst_entry
*dst
= skb_dst(skb
);
98 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
100 ret
= run_lwt_bpf(skb
, &bpf
->in
, dst
, NO_REDIRECT
);
105 if (unlikely(!dst
->lwtstate
->orig_input
)) {
106 pr_warn_once("orig_input not set on dst for prog %s\n",
112 return dst
->lwtstate
->orig_input(skb
);
115 static int bpf_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
117 struct dst_entry
*dst
= skb_dst(skb
);
121 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
123 ret
= run_lwt_bpf(skb
, &bpf
->out
, dst
, NO_REDIRECT
);
128 if (unlikely(!dst
->lwtstate
->orig_output
)) {
129 pr_warn_once("orig_output not set on dst for prog %s\n",
135 return dst
->lwtstate
->orig_output(net
, sk
, skb
);
138 static int xmit_check_hhlen(struct sk_buff
*skb
)
140 int hh_len
= skb_dst(skb
)->dev
->hard_header_len
;
142 if (skb_headroom(skb
) < hh_len
) {
143 int nhead
= HH_DATA_ALIGN(hh_len
- skb_headroom(skb
));
145 if (pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
))
152 static int bpf_xmit(struct sk_buff
*skb
)
154 struct dst_entry
*dst
= skb_dst(skb
);
157 bpf
= bpf_lwt_lwtunnel(dst
->lwtstate
);
158 if (bpf
->xmit
.prog
) {
161 ret
= run_lwt_bpf(skb
, &bpf
->xmit
, dst
, CAN_REDIRECT
);
164 /* If the header was expanded, headroom might be too
165 * small for L2 header to come, expand as needed.
167 ret
= xmit_check_hhlen(skb
);
171 return LWTUNNEL_XMIT_CONTINUE
;
173 return LWTUNNEL_XMIT_DONE
;
179 return LWTUNNEL_XMIT_CONTINUE
;
182 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog
*prog
)
185 bpf_prog_put(prog
->prog
);
190 static void bpf_destroy_state(struct lwtunnel_state
*lwt
)
192 struct bpf_lwt
*bpf
= bpf_lwt_lwtunnel(lwt
);
194 bpf_lwt_prog_destroy(&bpf
->in
);
195 bpf_lwt_prog_destroy(&bpf
->out
);
196 bpf_lwt_prog_destroy(&bpf
->xmit
);
199 static const struct nla_policy bpf_prog_policy
[LWT_BPF_PROG_MAX
+ 1] = {
200 [LWT_BPF_PROG_FD
] = { .type
= NLA_U32
, },
201 [LWT_BPF_PROG_NAME
] = { .type
= NLA_NUL_STRING
,
202 .len
= MAX_PROG_NAME
},
205 static int bpf_parse_prog(struct nlattr
*attr
, struct bpf_lwt_prog
*prog
,
206 enum bpf_prog_type type
)
208 struct nlattr
*tb
[LWT_BPF_PROG_MAX
+ 1];
213 ret
= nla_parse_nested(tb
, LWT_BPF_PROG_MAX
, attr
, bpf_prog_policy
,
218 if (!tb
[LWT_BPF_PROG_FD
] || !tb
[LWT_BPF_PROG_NAME
])
221 prog
->name
= nla_memdup(tb
[LWT_BPF_PROG_NAME
], GFP_ATOMIC
);
225 fd
= nla_get_u32(tb
[LWT_BPF_PROG_FD
]);
226 p
= bpf_prog_get_type(fd
, type
);
235 static const struct nla_policy bpf_nl_policy
[LWT_BPF_MAX
+ 1] = {
236 [LWT_BPF_IN
] = { .type
= NLA_NESTED
, },
237 [LWT_BPF_OUT
] = { .type
= NLA_NESTED
, },
238 [LWT_BPF_XMIT
] = { .type
= NLA_NESTED
, },
239 [LWT_BPF_XMIT_HEADROOM
] = { .type
= NLA_U32
},
242 static int bpf_build_state(struct nlattr
*nla
,
243 unsigned int family
, const void *cfg
,
244 struct lwtunnel_state
**ts
,
245 struct netlink_ext_ack
*extack
)
247 struct nlattr
*tb
[LWT_BPF_MAX
+ 1];
248 struct lwtunnel_state
*newts
;
252 if (family
!= AF_INET
&& family
!= AF_INET6
)
253 return -EAFNOSUPPORT
;
255 ret
= nla_parse_nested(tb
, LWT_BPF_MAX
, nla
, bpf_nl_policy
, extack
);
259 if (!tb
[LWT_BPF_IN
] && !tb
[LWT_BPF_OUT
] && !tb
[LWT_BPF_XMIT
])
262 newts
= lwtunnel_state_alloc(sizeof(*bpf
));
266 newts
->type
= LWTUNNEL_ENCAP_BPF
;
267 bpf
= bpf_lwt_lwtunnel(newts
);
269 if (tb
[LWT_BPF_IN
]) {
270 newts
->flags
|= LWTUNNEL_STATE_INPUT_REDIRECT
;
271 ret
= bpf_parse_prog(tb
[LWT_BPF_IN
], &bpf
->in
,
272 BPF_PROG_TYPE_LWT_IN
);
277 if (tb
[LWT_BPF_OUT
]) {
278 newts
->flags
|= LWTUNNEL_STATE_OUTPUT_REDIRECT
;
279 ret
= bpf_parse_prog(tb
[LWT_BPF_OUT
], &bpf
->out
,
280 BPF_PROG_TYPE_LWT_OUT
);
285 if (tb
[LWT_BPF_XMIT
]) {
286 newts
->flags
|= LWTUNNEL_STATE_XMIT_REDIRECT
;
287 ret
= bpf_parse_prog(tb
[LWT_BPF_XMIT
], &bpf
->xmit
,
288 BPF_PROG_TYPE_LWT_XMIT
);
293 if (tb
[LWT_BPF_XMIT_HEADROOM
]) {
294 u32 headroom
= nla_get_u32(tb
[LWT_BPF_XMIT_HEADROOM
]);
296 if (headroom
> LWT_BPF_MAX_HEADROOM
) {
301 newts
->headroom
= headroom
;
304 bpf
->family
= family
;
310 bpf_destroy_state(newts
);
315 static int bpf_fill_lwt_prog(struct sk_buff
*skb
, int attr
,
316 struct bpf_lwt_prog
*prog
)
323 nest
= nla_nest_start(skb
, attr
);
328 nla_put_string(skb
, LWT_BPF_PROG_NAME
, prog
->name
))
331 return nla_nest_end(skb
, nest
);
334 static int bpf_fill_encap_info(struct sk_buff
*skb
, struct lwtunnel_state
*lwt
)
336 struct bpf_lwt
*bpf
= bpf_lwt_lwtunnel(lwt
);
338 if (bpf_fill_lwt_prog(skb
, LWT_BPF_IN
, &bpf
->in
) < 0 ||
339 bpf_fill_lwt_prog(skb
, LWT_BPF_OUT
, &bpf
->out
) < 0 ||
340 bpf_fill_lwt_prog(skb
, LWT_BPF_XMIT
, &bpf
->xmit
) < 0)
346 static int bpf_encap_nlsize(struct lwtunnel_state
*lwtstate
)
348 int nest_len
= nla_total_size(sizeof(struct nlattr
)) +
349 nla_total_size(MAX_PROG_NAME
) + /* LWT_BPF_PROG_NAME */
352 return nest_len
+ /* LWT_BPF_IN */
353 nest_len
+ /* LWT_BPF_OUT */
354 nest_len
+ /* LWT_BPF_XMIT */
358 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog
*a
, struct bpf_lwt_prog
*b
)
361 * The LWT state is currently rebuilt for delete requests which
362 * results in a new bpf_prog instance. Comparing names for now.
364 if (!a
->name
&& !b
->name
)
367 if (!a
->name
|| !b
->name
)
370 return strcmp(a
->name
, b
->name
);
373 static int bpf_encap_cmp(struct lwtunnel_state
*a
, struct lwtunnel_state
*b
)
375 struct bpf_lwt
*a_bpf
= bpf_lwt_lwtunnel(a
);
376 struct bpf_lwt
*b_bpf
= bpf_lwt_lwtunnel(b
);
378 return bpf_lwt_prog_cmp(&a_bpf
->in
, &b_bpf
->in
) ||
379 bpf_lwt_prog_cmp(&a_bpf
->out
, &b_bpf
->out
) ||
380 bpf_lwt_prog_cmp(&a_bpf
->xmit
, &b_bpf
->xmit
);
383 static const struct lwtunnel_encap_ops bpf_encap_ops
= {
384 .build_state
= bpf_build_state
,
385 .destroy_state
= bpf_destroy_state
,
387 .output
= bpf_output
,
389 .fill_encap
= bpf_fill_encap_info
,
390 .get_encap_size
= bpf_encap_nlsize
,
391 .cmp_encap
= bpf_encap_cmp
,
392 .owner
= THIS_MODULE
,
395 static int __init
bpf_lwt_init(void)
397 return lwtunnel_encap_add_ops(&bpf_encap_ops
, LWTUNNEL_ENCAP_BPF
);
400 subsys_initcall(bpf_lwt_init
)