1 #include <uapi/linux/bpf.h>
2 #include <bpf/bpf_helpers.h>
3 #include "bpf_legacy.h"
4 #include <uapi/linux/in.h>
5 #include <uapi/linux/if.h>
6 #include <uapi/linux/if_ether.h>
7 #include <uapi/linux/ip.h>
8 #include <uapi/linux/ipv6.h>
9 #include <uapi/linux/if_tunnel.h>
11 #define IP_OFFSET 0x1FFF
15 __be16 h_vlan_encapsulated_proto
;
18 struct flow_key_record
{
29 static inline int proto_ports_offset(__u64 proto
)
46 static inline int ip_is_fragment(struct __sk_buff
*ctx
, __u64 nhoff
)
48 return load_half(ctx
, nhoff
+ offsetof(struct iphdr
, frag_off
))
49 & (IP_MF
| IP_OFFSET
);
52 static inline __u32
ipv6_addr_hash(struct __sk_buff
*ctx
, __u64 off
)
54 __u64 w0
= load_word(ctx
, off
);
55 __u64 w1
= load_word(ctx
, off
+ 4);
56 __u64 w2
= load_word(ctx
, off
+ 8);
57 __u64 w3
= load_word(ctx
, off
+ 12);
59 return (__u32
)(w0
^ w1
^ w2
^ w3
);
62 static inline __u64
parse_ip(struct __sk_buff
*skb
, __u64 nhoff
, __u64
*ip_proto
,
63 struct flow_key_record
*flow
)
67 if (unlikely(ip_is_fragment(skb
, nhoff
)))
70 *ip_proto
= load_byte(skb
, nhoff
+ offsetof(struct iphdr
, protocol
));
72 if (*ip_proto
!= IPPROTO_GRE
) {
73 flow
->src
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, saddr
));
74 flow
->dst
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, daddr
));
77 verlen
= load_byte(skb
, nhoff
+ 0/*offsetof(struct iphdr, ihl)*/);
78 if (likely(verlen
== 0x45))
81 nhoff
+= (verlen
& 0xF) << 2;
86 static inline __u64
parse_ipv6(struct __sk_buff
*skb
, __u64 nhoff
, __u64
*ip_proto
,
87 struct flow_key_record
*flow
)
89 *ip_proto
= load_byte(skb
,
90 nhoff
+ offsetof(struct ipv6hdr
, nexthdr
));
91 flow
->src
= ipv6_addr_hash(skb
,
92 nhoff
+ offsetof(struct ipv6hdr
, saddr
));
93 flow
->dst
= ipv6_addr_hash(skb
,
94 nhoff
+ offsetof(struct ipv6hdr
, daddr
));
95 nhoff
+= sizeof(struct ipv6hdr
);
100 static inline bool flow_dissector(struct __sk_buff
*skb
,
101 struct flow_key_record
*flow
)
103 __u64 nhoff
= ETH_HLEN
;
105 __u64 proto
= load_half(skb
, 12);
108 if (proto
== ETH_P_8021AD
) {
109 proto
= load_half(skb
, nhoff
+ offsetof(struct vlan_hdr
,
110 h_vlan_encapsulated_proto
));
111 nhoff
+= sizeof(struct vlan_hdr
);
114 if (proto
== ETH_P_8021Q
) {
115 proto
= load_half(skb
, nhoff
+ offsetof(struct vlan_hdr
,
116 h_vlan_encapsulated_proto
));
117 nhoff
+= sizeof(struct vlan_hdr
);
120 if (likely(proto
== ETH_P_IP
))
121 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
122 else if (proto
== ETH_P_IPV6
)
123 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
134 __u64 gre_flags
= load_half(skb
,
135 nhoff
+ offsetof(struct gre_hdr
, flags
));
136 __u64 gre_proto
= load_half(skb
,
137 nhoff
+ offsetof(struct gre_hdr
, proto
));
139 if (gre_flags
& (GRE_VERSION
|GRE_ROUTING
))
144 if (gre_flags
& GRE_CSUM
)
146 if (gre_flags
& GRE_KEY
)
148 if (gre_flags
& GRE_SEQ
)
151 if (proto
== ETH_P_8021Q
) {
152 proto
= load_half(skb
,
153 nhoff
+ offsetof(struct vlan_hdr
,
154 h_vlan_encapsulated_proto
));
155 nhoff
+= sizeof(struct vlan_hdr
);
158 if (proto
== ETH_P_IP
)
159 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
160 else if (proto
== ETH_P_IPV6
)
161 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
167 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
170 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
176 flow
->ip_proto
= ip_proto
;
177 poff
= proto_ports_offset(ip_proto
);
180 flow
->ports
= load_word(skb
, nhoff
);
183 flow
->thoff
= (__u16
) nhoff
;
194 __uint(type
, BPF_MAP_TYPE_HASH
);
196 __type(value
, struct pair
);
197 __uint(max_entries
, 1024);
198 } hash_map
SEC(".maps");
201 int bpf_prog2(struct __sk_buff
*skb
)
203 struct flow_key_record flow
= {};
207 if (!flow_dissector(skb
, &flow
))
211 value
= bpf_map_lookup_elem(&hash_map
, &key
);
213 __sync_fetch_and_add(&value
->packets
, 1);
214 __sync_fetch_and_add(&value
->bytes
, skb
->len
);
216 struct pair val
= {1, skb
->len
};
218 bpf_map_update_elem(&hash_map
, &key
, &val
, BPF_ANY
);
223 char _license
[] SEC("license") = "GPL";