vuls: init at 0.27.0
[NixPkgs.git] / nixos / tests / rke2 / multi-node.nix
blobddf0b60f6fba476a3edefb155ffd2cbfb9c129fc
1 import ../make-test-python.nix ({ pkgs, lib, rke2, ... }:
2   let
3     pauseImage = pkgs.dockerTools.streamLayeredImage {
4       name = "test.local/pause";
5       tag = "local";
6       contents = pkgs.buildEnv {
7         name = "rke2-pause-image-env";
8         paths = with pkgs; [ tini bashInteractive coreutils socat ];
9       };
10       config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
11     };
12     # A daemonset that responds 'server' on port 8000
13     networkTestDaemonset = pkgs.writeText "test.yml" ''
14       apiVersion: apps/v1
15       kind: DaemonSet
16       metadata:
17         name: test
18         labels:
19           name: test
20       spec:
21         selector:
22           matchLabels:
23             name: test
24         template:
25           metadata:
26             labels:
27               name: test
28           spec:
29             containers:
30             - name: test
31               image: test.local/pause:local
32               imagePullPolicy: Never
33               resources:
34                 limits:
35                   memory: 20Mi
36               command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
37     '';
38     tokenFile = pkgs.writeText "token" "p@s$w0rd";
39     agentTokenFile = pkgs.writeText "agent-token" "p@s$w0rd";
40   in
41   {
42     name = "${rke2.name}-multi-node";
43     meta.maintainers = rke2.meta.maintainers;
45     nodes = {
46       server1 = { pkgs, ... }: {
47         networking.firewall.enable = false;
48         networking.useDHCP = false;
49         networking.defaultGateway = "192.168.1.1";
50         networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
51           { address = "192.168.1.1"; prefixLength = 24; }
52         ];
54         virtualisation.memorySize = 1536;
55         virtualisation.diskSize = 4096;
57         services.rke2 = {
58           enable = true;
59           role = "server";
60           inherit tokenFile;
61           inherit agentTokenFile;
62           nodeName = "${rke2.name}-server1";
63           package = rke2;
64           nodeIP = "192.168.1.1";
65           disable = [
66             "rke2-coredns"
67             "rke2-metrics-server"
68             "rke2-ingress-nginx"
69           ];
70           extraFlags = [
71             "--cluster-reset"
72           ];
73         };
74       };
76       server2 = { pkgs, ... }: {
77         networking.firewall.enable = false;
78         networking.useDHCP = false;
79         networking.defaultGateway = "192.168.1.2";
80         networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
81           { address = "192.168.1.2"; prefixLength = 24; }
82         ];
84         virtualisation.memorySize = 1536;
85         virtualisation.diskSize = 4096;
87         services.rke2 = {
88           enable = true;
89           role = "server";
90           serverAddr = "https://192.168.1.1:6443";
91           inherit tokenFile;
92           inherit agentTokenFile;
93           nodeName = "${rke2.name}-server2";
94           package = rke2;
95           nodeIP = "192.168.1.2";
96           disable = [
97             "rke2-coredns"
98             "rke2-metrics-server"
99             "rke2-ingress-nginx"
100           ];
101         };
102       };
104       agent1 = { pkgs, ... }: {
105         networking.firewall.enable = false;
106         networking.useDHCP = false;
107         networking.defaultGateway = "192.168.1.3";
108         networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
109           { address = "192.168.1.3"; prefixLength = 24; }
110         ];
112         virtualisation.memorySize = 1536;
113         virtualisation.diskSize = 4096;
115         services.rke2 = {
116           enable = true;
117           role = "agent";
118           tokenFile = agentTokenFile;
119           serverAddr = "https://192.168.1.2:6443";
120           nodeName = "${rke2.name}-agent1";
121           package = rke2;
122           nodeIP = "192.168.1.3";
123         };
124       };
125     };
127     testScript = let
128       kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
129       ctr = "${pkgs.containerd}/bin/ctr -a /run/k3s/containerd/containerd.sock";
130       jq = "${pkgs.jq}/bin/jq";
131       ping = "${pkgs.iputils}/bin/ping";
132     in ''
133       machines = [server1, server2, agent1]
135       for machine in machines:
136           machine.start()
137           machine.wait_for_unit("rke2")
139       # wait for the agent to show up
140       server1.succeed("${kubectl} get node ${rke2.name}-agent1")
142       for machine in machines:
143           machine.succeed("${pauseImage} | ${ctr} image import -")
145       server1.succeed("${kubectl} cluster-info")
146       server1.wait_until_succeeds("${kubectl} get serviceaccount default")
148       # Now create a pod on each node via a daemonset and verify they can talk to each other.
149       server1.succeed("${kubectl} apply -f ${networkTestDaemonset}")
150       server1.wait_until_succeeds(
151           f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
152       )
154       # Get pod IPs
155       pods = server1.succeed("${kubectl} get po -o json | ${jq} '.items[].metadata.name' -r").splitlines()
156       pod_ips = [
157           server1.succeed(f"${kubectl} get po {n} -o json | ${jq} '.status.podIP' -cr").strip() for n in pods
158       ]
160       # Verify each server can ping each pod ip
161       for pod_ip in pod_ips:
162           server1.succeed(f"${ping} -c 1 {pod_ip}")
163           agent1.succeed(f"${ping} -c 1 {pod_ip}")
165       # Verify the pods can talk to each other
166       resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
167       assert resp.strip() == "server"
168       resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
169       assert resp.strip() == "server"
171       # Cleanup
172       server1.succeed("${kubectl} delete -f ${networkTestDaemonset}")
173       for machine in machines:
174           machine.shutdown()
175     '';
176   })