1 #include <uapi/linux/bpf.h>
2 #include "bpf_helpers.h"
3 #include <uapi/linux/in.h>
4 #include <uapi/linux/if.h>
5 #include <uapi/linux/if_ether.h>
6 #include <uapi/linux/ip.h>
7 #include <uapi/linux/ipv6.h>
8 #include <uapi/linux/if_tunnel.h>
10 #define IP_OFFSET 0x1FFF
14 __be16 h_vlan_encapsulated_proto
;
28 static inline int proto_ports_offset(__u64 proto
)
45 static inline int ip_is_fragment(struct __sk_buff
*ctx
, __u64 nhoff
)
47 return load_half(ctx
, nhoff
+ offsetof(struct iphdr
, frag_off
))
48 & (IP_MF
| IP_OFFSET
);
51 static inline __u32
ipv6_addr_hash(struct __sk_buff
*ctx
, __u64 off
)
53 __u64 w0
= load_word(ctx
, off
);
54 __u64 w1
= load_word(ctx
, off
+ 4);
55 __u64 w2
= load_word(ctx
, off
+ 8);
56 __u64 w3
= load_word(ctx
, off
+ 12);
58 return (__u32
)(w0
^ w1
^ w2
^ w3
);
61 static inline __u64
parse_ip(struct __sk_buff
*skb
, __u64 nhoff
, __u64
*ip_proto
,
62 struct flow_keys
*flow
)
66 if (unlikely(ip_is_fragment(skb
, nhoff
)))
69 *ip_proto
= load_byte(skb
, nhoff
+ offsetof(struct iphdr
, protocol
));
71 if (*ip_proto
!= IPPROTO_GRE
) {
72 flow
->src
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, saddr
));
73 flow
->dst
= load_word(skb
, nhoff
+ offsetof(struct iphdr
, daddr
));
76 verlen
= load_byte(skb
, nhoff
+ 0/*offsetof(struct iphdr, ihl)*/);
77 if (likely(verlen
== 0x45))
80 nhoff
+= (verlen
& 0xF) << 2;
85 static inline __u64
parse_ipv6(struct __sk_buff
*skb
, __u64 nhoff
, __u64
*ip_proto
,
86 struct flow_keys
*flow
)
88 *ip_proto
= load_byte(skb
,
89 nhoff
+ offsetof(struct ipv6hdr
, nexthdr
));
90 flow
->src
= ipv6_addr_hash(skb
,
91 nhoff
+ offsetof(struct ipv6hdr
, saddr
));
92 flow
->dst
= ipv6_addr_hash(skb
,
93 nhoff
+ offsetof(struct ipv6hdr
, daddr
));
94 nhoff
+= sizeof(struct ipv6hdr
);
99 static inline bool flow_dissector(struct __sk_buff
*skb
, struct flow_keys
*flow
)
101 __u64 nhoff
= ETH_HLEN
;
103 __u64 proto
= load_half(skb
, 12);
106 if (proto
== ETH_P_8021AD
) {
107 proto
= load_half(skb
, nhoff
+ offsetof(struct vlan_hdr
,
108 h_vlan_encapsulated_proto
));
109 nhoff
+= sizeof(struct vlan_hdr
);
112 if (proto
== ETH_P_8021Q
) {
113 proto
= load_half(skb
, nhoff
+ offsetof(struct vlan_hdr
,
114 h_vlan_encapsulated_proto
));
115 nhoff
+= sizeof(struct vlan_hdr
);
118 if (likely(proto
== ETH_P_IP
))
119 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
120 else if (proto
== ETH_P_IPV6
)
121 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
132 __u64 gre_flags
= load_half(skb
,
133 nhoff
+ offsetof(struct gre_hdr
, flags
));
134 __u64 gre_proto
= load_half(skb
,
135 nhoff
+ offsetof(struct gre_hdr
, proto
));
137 if (gre_flags
& (GRE_VERSION
|GRE_ROUTING
))
142 if (gre_flags
& GRE_CSUM
)
144 if (gre_flags
& GRE_KEY
)
146 if (gre_flags
& GRE_SEQ
)
149 if (proto
== ETH_P_8021Q
) {
150 proto
= load_half(skb
,
151 nhoff
+ offsetof(struct vlan_hdr
,
152 h_vlan_encapsulated_proto
));
153 nhoff
+= sizeof(struct vlan_hdr
);
156 if (proto
== ETH_P_IP
)
157 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
158 else if (proto
== ETH_P_IPV6
)
159 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
165 nhoff
= parse_ip(skb
, nhoff
, &ip_proto
, flow
);
168 nhoff
= parse_ipv6(skb
, nhoff
, &ip_proto
, flow
);
174 flow
->ip_proto
= ip_proto
;
175 poff
= proto_ports_offset(ip_proto
);
178 flow
->ports
= load_word(skb
, nhoff
);
181 flow
->thoff
= (__u16
) nhoff
;
191 struct bpf_map_def
SEC("maps") hash_map
= {
192 .type
= BPF_MAP_TYPE_HASH
,
193 .key_size
= sizeof(__be32
),
194 .value_size
= sizeof(struct pair
),
199 int bpf_prog2(struct __sk_buff
*skb
)
201 struct flow_keys flow
;
205 if (!flow_dissector(skb
, &flow
))
209 value
= bpf_map_lookup_elem(&hash_map
, &key
);
211 __sync_fetch_and_add(&value
->packets
, 1);
212 __sync_fetch_and_add(&value
->bytes
, skb
->len
);
214 struct pair val
= {1, skb
->len
};
216 bpf_map_update_elem(&hash_map
, &key
, &val
, BPF_ANY
);
221 char _license
[] SEC("license") = "GPL";