vuls: init at 0.27.0
[NixPkgs.git] / nixos / tests / nebula.nix
blob124be233219642605859b3f644234cade2326e6e
1 import ./make-test-python.nix ({ pkgs, lib, ... }: let
3   # We'll need to be able to trade cert files between nodes via scp.
4   inherit (import ./ssh-keys.nix pkgs)
5     snakeOilPrivateKey snakeOilPublicKey;
7   makeNebulaNode = { config, ... }: name: extraConfig: lib.mkMerge [
8     {
9       # Expose nebula for doing cert signing.
10       environment.systemPackages = [ pkgs.nebula ];
11       users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
12       services.openssh.enable = true;
13       networking.firewall.enable = true; # Implicitly true, but let's make sure.
14       networking.interfaces.eth1.useDHCP = false;
16       services.nebula.networks.smoke = {
17         # Note that these paths won't exist when the machine is first booted.
18         ca = "/etc/nebula/ca.crt";
19         cert = "/etc/nebula/${name}.crt";
20         key = "/etc/nebula/${name}.key";
21         listen = {
22           host = "0.0.0.0";
23           port = if (config.services.nebula.networks.smoke.isLighthouse || config.services.nebula.networks.smoke.isRelay) then 4242 else 0;
24         };
25       };
26     }
27     extraConfig
28   ];
32   name = "nebula";
34   nodes = {
36     lighthouse = { ... } @ args:
37       makeNebulaNode args "lighthouse" {
38         networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
39           address = "192.168.1.1";
40           prefixLength = 24;
41         }];
43         services.nebula.networks.smoke = {
44           isLighthouse = true;
45           isRelay = true;
46           firewall = {
47             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
48             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
49           };
50         };
51       };
53     allowAny = { ... } @ args:
54       makeNebulaNode args "allowAny" {
55         networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
56           address = "192.168.1.2";
57           prefixLength = 24;
58         }];
60         services.nebula.networks.smoke = {
61           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
62           isLighthouse = false;
63           lighthouses = [ "10.0.100.1" ];
64           relays = [ "10.0.100.1" ];
65           firewall = {
66             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
67             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
68           };
69         };
70       };
72     allowFromLighthouse = { ... } @ args:
73       makeNebulaNode args "allowFromLighthouse" {
74         networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
75           address = "192.168.1.3";
76           prefixLength = 24;
77         }];
79         services.nebula.networks.smoke = {
80           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
81           isLighthouse = false;
82           lighthouses = [ "10.0.100.1" ];
83           relays = [ "10.0.100.1" ];
84           firewall = {
85             outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
86             inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
87           };
88         };
89       };
91     allowToLighthouse = { ... } @ args:
92       makeNebulaNode args "allowToLighthouse" {
93         networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
94           address = "192.168.1.4";
95           prefixLength = 24;
96         }];
98         services.nebula.networks.smoke = {
99           enable = true;
100           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
101           isLighthouse = false;
102           lighthouses = [ "10.0.100.1" ];
103           relays = [ "10.0.100.1" ];
104           firewall = {
105             outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
106             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
107           };
108         };
109       };
111     disabled = { ... } @ args:
112       makeNebulaNode args "disabled" {
113         networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
114           address = "192.168.1.5";
115           prefixLength = 24;
116         }];
118         services.nebula.networks.smoke = {
119           enable = false;
120           staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
121           isLighthouse = false;
122           lighthouses = [ "10.0.100.1" ];
123           relays = [ "10.0.100.1" ];
124           firewall = {
125             outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
126             inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
127           };
128         };
129       };
131   };
133   testScript = let
135     setUpPrivateKey = name: ''
136       ${name}.start()
137       ${name}.succeed(
138           "mkdir -p /root/.ssh",
139           "chmod 700 /root/.ssh",
140           "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
141           "chmod 600 /root/.ssh/id_snakeoil",
142           "mkdir -p /root"
143       )
144     '';
146     # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
147     sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
149     restartAndCheckNebula = name: ip: ''
150       ${name}.systemctl("restart nebula@smoke.service")
151       ${name}.succeed("ping -c5 ${ip}")
152     '';
154     # Create a keypair on the client node, then use the public key to sign a cert on the lighthouse.
155     signKeysFor = name: ip: ''
156       lighthouse.wait_for_unit("sshd.service")
157       ${name}.wait_for_unit("sshd.service")
158       ${name}.succeed(
159           "mkdir -p /etc/nebula",
160           "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
161           "scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub",
162       )
163       lighthouse.succeed(
164           'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt'
165       )
166       ${name}.succeed(
167           "scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt",
168           "scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
169           '(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true'
170       )
171     '';
173     getPublicIp = node: ''
174       ${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip()
175     '';
177     # Never do this for anything security critical! (Thankfully it's just a test.)
178     # Restart Nebula right after the mutual block and/or restore so the state is fresh.
179     blockTrafficBetween = nodeA: nodeB: ''
180       node_a = ${getPublicIp nodeA}
181       node_b = ${getPublicIp nodeB}
182       ${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP")
183       ${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP")
184       ${nodeA}.systemctl("restart nebula@smoke.service")
185       ${nodeB}.systemctl("restart nebula@smoke.service")
186     '';
187     allowTrafficBetween = nodeA: nodeB: ''
188       node_a = ${getPublicIp nodeA}
189       node_b = ${getPublicIp nodeB}
190       ${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP")
191       ${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP")
192       ${nodeA}.systemctl("restart nebula@smoke.service")
193       ${nodeB}.systemctl("restart nebula@smoke.service")
194     '';
195   in ''
196     # Create the certificate and sign the lighthouse's keys.
197     ${setUpPrivateKey "lighthouse"}
198     lighthouse.succeed(
199         "mkdir -p /etc/nebula",
200         'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
201         'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
202         'chown -R nebula-smoke:nebula-smoke /etc/nebula'
203     )
205     # Reboot the lighthouse and verify that the nebula service comes up on boot.
206     # Since rebooting takes a while, we'll just restart the service on the other nodes.
207     lighthouse.shutdown()
208     lighthouse.start()
209     lighthouse.wait_for_unit("nebula@smoke.service")
210     lighthouse.succeed("ping -c5 10.0.100.1")
212     # Create keys for allowAny's nebula service and test that it comes up.
213     ${setUpPrivateKey "allowAny"}
214     ${signKeysFor "allowAny" "10.0.100.2/24"}
215     ${restartAndCheckNebula "allowAny" "10.0.100.2"}
217     # Create keys for allowFromLighthouse's nebula service and test that it comes up.
218     ${setUpPrivateKey "allowFromLighthouse"}
219     ${signKeysFor "allowFromLighthouse" "10.0.100.3/24"}
220     ${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"}
222     # Create keys for allowToLighthouse's nebula service and test that it comes up.
223     ${setUpPrivateKey "allowToLighthouse"}
224     ${signKeysFor "allowToLighthouse" "10.0.100.4/24"}
225     ${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"}
227     # Create keys for disabled's nebula service and test that it does not come up.
228     ${setUpPrivateKey "disabled"}
229     ${signKeysFor "disabled" "10.0.100.5/24"}
230     disabled.fail("systemctl status nebula@smoke.service")
231     disabled.fail("ping -c5 10.0.100.5")
233     # The lighthouse can ping allowAny and allowFromLighthouse but not disabled
234     lighthouse.succeed("ping -c3 10.0.100.2")
235     lighthouse.succeed("ping -c3 10.0.100.3")
236     lighthouse.fail("ping -c3 10.0.100.5")
238     # allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall
239     allowAny.succeed("ping -c3 10.0.100.1")
240     allowAny.fail("ping -c3 10.0.100.3")
242     # allowFromLighthouse can ping the lighthouse and allowAny
243     allowFromLighthouse.succeed("ping -c3 10.0.100.1")
244     allowFromLighthouse.succeed("ping -c3 10.0.100.2")
246     # block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work.
247     ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
248     allowFromLighthouse.succeed("ping -c10 10.0.100.2")
249     ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
250     allowFromLighthouse.succeed("ping -c10 10.0.100.2")
252     # allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse
253     allowToLighthouse.succeed("ping -c3 10.0.100.1")
254     allowToLighthouse.fail("ping -c3 10.0.100.2")
255     allowToLighthouse.fail("ping -c3 10.0.100.3")
257     # allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first
258     allowAny.succeed("ping -c3 10.0.100.3")
260     # block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work.
261     ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
262     allowFromLighthouse.succeed("ping -c10 10.0.100.2")
263     allowAny.succeed("ping -c10 10.0.100.3")
264     ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
265     allowFromLighthouse.succeed("ping -c10 10.0.100.2")
266     allowAny.succeed("ping -c10 10.0.100.3")
268     # allowToLighthouse can ping allowAny if allowAny pings it first
269     allowAny.succeed("ping -c3 10.0.100.4")
270     allowToLighthouse.succeed("ping -c3 10.0.100.2")
272     # block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work.
273     ${blockTrafficBetween "allowAny" "allowToLighthouse"}
274     allowAny.succeed("ping -c10 10.0.100.4")
275     allowToLighthouse.succeed("ping -c10 10.0.100.2")
276     ${allowTrafficBetween "allowAny" "allowToLighthouse"}
277     allowAny.succeed("ping -c10 10.0.100.4")
278     allowToLighthouse.succeed("ping -c10 10.0.100.2")
280     # block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny
281     ${blockTrafficBetween "allowFromLighthouse" "lighthouse"}
282     ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
283     allowFromLighthouse.fail("ping -c3 10.0.100.2")
284     ${allowTrafficBetween "allowFromLighthouse" "lighthouse"}
285     ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
286     allowFromLighthouse.succeed("ping -c3 10.0.100.2")
288     # block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse
289     ${blockTrafficBetween "allowAny" "lighthouse"}
290     ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
291     ${blockTrafficBetween "allowAny" "allowToLighthouse"}
292     allowFromLighthouse.fail("ping -c3 10.0.100.2")
293     allowAny.fail("ping -c3 10.0.100.3")
294     allowAny.fail("ping -c3 10.0.100.4")
295     ${allowTrafficBetween "allowAny" "lighthouse"}
296     ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
297     ${allowTrafficBetween "allowAny" "allowToLighthouse"}
298     allowFromLighthouse.succeed("ping -c3 10.0.100.2")
299     allowAny.succeed("ping -c3 10.0.100.3")
300     allowAny.succeed("ping -c3 10.0.100.4")
302     # block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny
303     ${blockTrafficBetween "allowToLighthouse" "lighthouse"}
304     ${blockTrafficBetween "allowToLighthouse" "allowAny"}
305     allowAny.fail("ping -c3 10.0.100.4")
306     allowToLighthouse.fail("ping -c3 10.0.100.2")
307     ${allowTrafficBetween "allowToLighthouse" "lighthouse"}
308     ${allowTrafficBetween "allowToLighthouse" "allowAny"}
309     allowAny.succeed("ping -c3 10.0.100.4")
310     allowToLighthouse.succeed("ping -c3 10.0.100.2")
311   '';