1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 Cloudflare Ltd.
3 // Copyright (c) 2020 Isovalent, Inc.
9 #include <linux/if_ether.h>
12 #include <linux/ipv6.h>
13 #include <linux/pkt_cls.h>
14 #include <linux/tcp.h>
15 #include <sys/socket.h>
16 #include <bpf/bpf_helpers.h>
17 #include <bpf/bpf_endian.h>
19 /* Pin map under /sys/fs/bpf/tc/globals/<map name> */
20 #define PIN_GLOBAL_NS 2
22 /* Must match struct bpf_elf_map layout from iproute2 */
31 } server_map
SEC("maps") = {
32 .type
= BPF_MAP_TYPE_SOCKMAP
,
33 .size_key
= sizeof(int),
34 .size_value
= sizeof(__u64
),
36 .pinning
= PIN_GLOBAL_NS
,
39 int _version
SEC("version") = 1;
40 char _license
[] SEC("license") = "GPL";
42 /* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
43 static inline struct bpf_sock_tuple
*
44 get_tuple(struct __sk_buff
*skb
, bool *ipv4
, bool *tcp
)
46 void *data_end
= (void *)(long)skb
->data_end
;
47 void *data
= (void *)(long)skb
->data
;
48 struct bpf_sock_tuple
*result
;
54 eth
= (struct ethhdr
*)(data
);
55 if (eth
+ 1 > data_end
)
58 if (eth
->h_proto
== bpf_htons(ETH_P_IP
)) {
59 struct iphdr
*iph
= (struct iphdr
*)(data
+ sizeof(*eth
));
61 if (iph
+ 1 > data_end
)
64 /* Options are not supported */
66 ihl_len
= iph
->ihl
* 4;
67 proto
= iph
->protocol
;
69 result
= (struct bpf_sock_tuple
*)&iph
->saddr
;
70 } else if (eth
->h_proto
== bpf_htons(ETH_P_IPV6
)) {
71 struct ipv6hdr
*ip6h
= (struct ipv6hdr
*)(data
+ sizeof(*eth
));
73 if (ip6h
+ 1 > data_end
)
75 ihl_len
= sizeof(*ip6h
);
76 proto
= ip6h
->nexthdr
;
78 result
= (struct bpf_sock_tuple
*)&ip6h
->saddr
;
80 return (struct bpf_sock_tuple
*)data
;
83 if (proto
!= IPPROTO_TCP
&& proto
!= IPPROTO_UDP
)
86 *tcp
= (proto
== IPPROTO_TCP
);
91 handle_udp(struct __sk_buff
*skb
, struct bpf_sock_tuple
*tuple
, bool ipv4
)
93 struct bpf_sock_tuple ln
= {0};
100 tuple_len
= ipv4
? sizeof(tuple
->ipv4
) : sizeof(tuple
->ipv6
);
101 if ((void *)tuple
+ tuple_len
> (void *)(long)skb
->data_end
)
104 sk
= bpf_sk_lookup_udp(skb
, tuple
, tuple_len
, BPF_F_CURRENT_NETNS
, 0);
108 dport
= ipv4
? tuple
->ipv4
.dport
: tuple
->ipv6
.dport
;
109 if (dport
!= bpf_htons(4321))
112 sk
= bpf_map_lookup_elem(&server_map
, &zero
);
117 ret
= bpf_sk_assign(skb
, sk
, 0);
123 handle_tcp(struct __sk_buff
*skb
, struct bpf_sock_tuple
*tuple
, bool ipv4
)
125 struct bpf_sock_tuple ln
= {0};
132 tuple_len
= ipv4
? sizeof(tuple
->ipv4
) : sizeof(tuple
->ipv6
);
133 if ((void *)tuple
+ tuple_len
> (void *)(long)skb
->data_end
)
136 sk
= bpf_skc_lookup_tcp(skb
, tuple
, tuple_len
, BPF_F_CURRENT_NETNS
, 0);
138 if (sk
->state
!= BPF_TCP_LISTEN
)
143 dport
= ipv4
? tuple
->ipv4
.dport
: tuple
->ipv6
.dport
;
144 if (dport
!= bpf_htons(4321))
147 sk
= bpf_map_lookup_elem(&server_map
, &zero
);
151 if (sk
->state
!= BPF_TCP_LISTEN
) {
157 ret
= bpf_sk_assign(skb
, sk
, 0);
162 SEC("classifier/sk_assign_test")
163 int bpf_sk_assign_test(struct __sk_buff
*skb
)
165 struct bpf_sock_tuple
*tuple
, ln
= {0};
171 tuple
= get_tuple(skb
, &ipv4
, &tcp
);
175 /* Note that the verifier socket return type for bpf_skc_lookup_tcp()
176 * differs from bpf_sk_lookup_udp(), so even though the C-level type is
177 * the same here, if we try to share the implementations they will
178 * fail to verify because we're crossing pointer types.
181 ret
= handle_tcp(skb
, tuple
, ipv4
);
183 ret
= handle_udp(skb
, tuple
, ipv4
);
185 return ret
== 0 ? TC_ACT_OK
: TC_ACT_SHOT
;