1 /* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 #include <uapi/linux/bpf.h>
8 #include <bpf/bpf_helpers.h>
9 #include "bpf_legacy.h"
10 #include <uapi/linux/in.h>
11 #include <uapi/linux/if.h>
12 #include <uapi/linux/if_ether.h>
13 #include <uapi/linux/ip.h>
14 #include <uapi/linux/ipv6.h>
15 #include <uapi/linux/if_tunnel.h>
16 #include <uapi/linux/mpls.h>
18 #define IP_OFFSET 0x1FFF
20 #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
22 struct bpf_map_def
SEC("maps") jmp_table
= {
23 .type
= BPF_MAP_TYPE_PROG_ARRAY
,
24 .key_size
= sizeof(u32
),
25 .value_size
= sizeof(u32
),
34 /* protocol dispatch routine.
35 * It tail-calls next BPF program depending on eth proto
36 * Note, we could have used:
37 * bpf_tail_call(skb, &jmp_table, proto);
38 * but it would need large prog_array
40 static inline void parse_eth_proto(struct __sk_buff
*skb
, u32 proto
)
45 bpf_tail_call(skb
, &jmp_table
, PARSE_VLAN
);
49 bpf_tail_call(skb
, &jmp_table
, PARSE_MPLS
);
52 bpf_tail_call(skb
, &jmp_table
, PARSE_IP
);
55 bpf_tail_call(skb
, &jmp_table
, PARSE_IPV6
);
62 __be16 h_vlan_encapsulated_proto
;
65 struct flow_key_record
{
75 static inline int ip_is_fragment(struct __sk_buff
*ctx
, __u64 nhoff
)
77 return load_half(ctx
, nhoff
+ offsetof(struct iphdr
, frag_off
))
78 & (IP_MF
| IP_OFFSET
);
81 static inline __u32
ipv6_addr_hash(struct __sk_buff
*ctx
, __u64 off
)
83 __u64 w0
= load_word(ctx
, off
);
84 __u64 w1
= load_word(ctx
, off
+ 4);
85 __u64 w2
= load_word(ctx
, off
+ 8);
86 __u64 w3
= load_word(ctx
, off
+ 12);
88 return (__u32
)(w0
^ w1
^ w2
^ w3
);
92 struct flow_key_record flow
;
95 struct bpf_map_def
SEC("maps") percpu_map
= {
96 .type
= BPF_MAP_TYPE_ARRAY
,
97 .key_size
= sizeof(__u32
),
98 .value_size
= sizeof(struct globals
),
102 /* user poor man's per_cpu until native support is ready */
103 static struct globals
*this_cpu_globals(void)
105 u32 key
= bpf_get_smp_processor_id();
107 return bpf_map_lookup_elem(&percpu_map
, &key
);
110 /* some simple stats for user space consumption */
116 struct bpf_map_def
SEC("maps") hash_map
= {
117 .type
= BPF_MAP_TYPE_HASH
,
118 .key_size
= sizeof(struct flow_key_record
),
119 .value_size
= sizeof(struct pair
),
123 static void update_stats(struct __sk_buff
*skb
, struct globals
*g
)
125 struct flow_key_record key
= g
->flow
;
128 value
= bpf_map_lookup_elem(&hash_map
, &key
);
130 __sync_fetch_and_add(&value
->packets
, 1);
131 __sync_fetch_and_add(&value
->bytes
, skb
->len
);
133 struct pair val
= {1, skb
->len
};
135 bpf_map_update_elem(&hash_map
, &key
, &val
, BPF_ANY
);
139 static __always_inline
void parse_ip_proto(struct __sk_buff
*skb
,
140 struct globals
*g
, __u32 ip_proto
)
142 __u32 nhoff
= skb
->cb
[0];
152 __u32 gre_flags
= load_half(skb
,
153 nhoff
+ offsetof(struct gre_hdr
, flags
));
154 __u32 gre_proto
= load_half(skb
,
155 nhoff
+ offsetof(struct gre_hdr
, proto
));
157 if (gre_flags
& (GRE_VERSION
|GRE_ROUTING
))
161 if (gre_flags
& GRE_CSUM
)
163 if (gre_flags
& GRE_KEY
)
165 if (gre_flags
& GRE_SEQ
)
169 parse_eth_proto(skb
, gre_proto
);
173 parse_eth_proto(skb
, ETH_P_IP
);
176 parse_eth_proto(skb
, ETH_P_IPV6
);
180 g
->flow
.ports
= load_word(skb
, nhoff
);
182 g
->flow
.ip_proto
= ip_proto
;
183 update_stats(skb
, g
);
190 PROG(PARSE_IP
)(struct __sk_buff
*skb
)
192 struct globals
*g
= this_cpu_globals();
193 __u32 nhoff
, verlen
, ip_proto
;
200 if (unlikely(ip_is_fragment(skb
, nhoff
)))
203 ip_proto
= load_byte(skb
, nhoff
+ offsetof(struct iphdr
, protocol
));
205 if (ip_proto
!= IPPROTO_GRE
) {
206 g
->flow
.src
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, saddr
));
207 g
->flow
.dst
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, daddr
));
210 verlen
= load_byte(skb
, nhoff
+ 0/*offsetof(struct iphdr, ihl)*/);
211 nhoff
+= (verlen
& 0xF) << 2;
214 parse_ip_proto(skb
, g
, ip_proto
);
218 PROG(PARSE_IPV6
)(struct __sk_buff
*skb
)
220 struct globals
*g
= this_cpu_globals();
221 __u32 nhoff
, ip_proto
;
228 ip_proto
= load_byte(skb
,
229 nhoff
+ offsetof(struct ipv6hdr
, nexthdr
));
230 g
->flow
.src
= ipv6_addr_hash(skb
,
231 nhoff
+ offsetof(struct ipv6hdr
, saddr
));
232 g
->flow
.dst
= ipv6_addr_hash(skb
,
233 nhoff
+ offsetof(struct ipv6hdr
, daddr
));
234 nhoff
+= sizeof(struct ipv6hdr
);
237 parse_ip_proto(skb
, g
, ip_proto
);
241 PROG(PARSE_VLAN
)(struct __sk_buff
*skb
)
247 proto
= load_half(skb
, nhoff
+ offsetof(struct vlan_hdr
,
248 h_vlan_encapsulated_proto
));
249 nhoff
+= sizeof(struct vlan_hdr
);
252 parse_eth_proto(skb
, proto
);
257 PROG(PARSE_MPLS
)(struct __sk_buff
*skb
)
263 label
= load_word(skb
, nhoff
);
264 nhoff
+= sizeof(struct mpls_label
);
267 if (label
& MPLS_LS_S_MASK
) {
268 __u8 verlen
= load_byte(skb
, nhoff
);
269 if ((verlen
& 0xF0) == 4)
270 parse_eth_proto(skb
, ETH_P_IP
);
272 parse_eth_proto(skb
, ETH_P_IPV6
);
274 parse_eth_proto(skb
, ETH_P_MPLS_UC
);
281 int main_prog(struct __sk_buff
*skb
)
283 __u32 nhoff
= ETH_HLEN
;
284 __u32 proto
= load_half(skb
, 12);
287 parse_eth_proto(skb
, proto
);
291 char _license
[] SEC("license") = "GPL";