parinfer-rust: useFetchCargoVendor
[NixPkgs.git] / nixos / tests / k3s / multi-node.nix
blobc1d89af2f3e2214b6ea4966f998f4dd54688be43
1 # A test that runs a multi-node k3s cluster and verify pod networking works across nodes
2 import ../make-test-python.nix (
3   {
4     pkgs,
5     lib,
6     k3s,
7     ...
8   }:
9   let
10     imageEnv = pkgs.buildEnv {
11       name = "k3s-pause-image-env";
12       paths = with pkgs; [
13         tini
14         bashInteractive
15         coreutils
16         socat
17       ];
18     };
19     pauseImage = pkgs.dockerTools.buildImage {
20       name = "test.local/pause";
21       tag = "local";
22       copyToRoot = imageEnv;
23       config.Entrypoint = [
24         "/bin/tini"
25         "--"
26         "/bin/sleep"
27         "inf"
28       ];
29     };
30     # A daemonset that responds 'server' on port 8000
31     networkTestDaemonset = pkgs.writeText "test.yml" ''
32       apiVersion: apps/v1
33       kind: DaemonSet
34       metadata:
35         name: test
36         labels:
37           name: test
38       spec:
39         selector:
40           matchLabels:
41             name: test
42         template:
43           metadata:
44             labels:
45               name: test
46           spec:
47             containers:
48             - name: test
49               image: test.local/pause:local
50               imagePullPolicy: Never
51               resources:
52                 limits:
53                   memory: 20Mi
54               command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
55     '';
56     tokenFile = pkgs.writeText "token" "p@s$w0rd";
57   in
58   {
59     name = "${k3s.name}-multi-node";
61     nodes = {
62       server =
63         { pkgs, ... }:
64         {
65           environment.systemPackages = with pkgs; [
66             gzip
67             jq
68           ];
69           # k3s uses enough resources the default vm fails.
70           virtualisation.memorySize = 1536;
71           virtualisation.diskSize = 4096;
73           services.k3s = {
74             inherit tokenFile;
75             enable = true;
76             role = "server";
77             package = k3s;
78             images = [ pauseImage ];
79             clusterInit = true;
80             extraFlags = [
81               "--disable coredns"
82               "--disable local-storage"
83               "--disable metrics-server"
84               "--disable servicelb"
85               "--disable traefik"
86               "--node-ip 192.168.1.1"
87               "--pause-image test.local/pause:local"
88             ];
89           };
90           networking.firewall.allowedTCPPorts = [
91             2379
92             2380
93             6443
94           ];
95           networking.firewall.allowedUDPPorts = [ 8472 ];
96           networking.firewall.trustedInterfaces = [ "flannel.1" ];
97           networking.useDHCP = false;
98           networking.defaultGateway = "192.168.1.1";
99           networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
100             {
101               address = "192.168.1.1";
102               prefixLength = 24;
103             }
104           ];
105         };
107       server2 =
108         { pkgs, ... }:
109         {
110           environment.systemPackages = with pkgs; [
111             gzip
112             jq
113           ];
114           virtualisation.memorySize = 1536;
115           virtualisation.diskSize = 4096;
117           services.k3s = {
118             inherit tokenFile;
119             enable = true;
120             package = k3s;
121             images = [ pauseImage ];
122             serverAddr = "https://192.168.1.1:6443";
123             clusterInit = false;
124             extraFlags = [
125               "--disable coredns"
126               "--disable local-storage"
127               "--disable metrics-server"
128               "--disable servicelb"
129               "--disable traefik"
130               "--node-ip 192.168.1.3"
131               "--pause-image test.local/pause:local"
132             ];
133           };
134           networking.firewall.allowedTCPPorts = [
135             2379
136             2380
137             6443
138           ];
139           networking.firewall.allowedUDPPorts = [ 8472 ];
140           networking.firewall.trustedInterfaces = [ "flannel.1" ];
141           networking.useDHCP = false;
142           networking.defaultGateway = "192.168.1.3";
143           networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
144             {
145               address = "192.168.1.3";
146               prefixLength = 24;
147             }
148           ];
149         };
151       agent =
152         { pkgs, ... }:
153         {
154           virtualisation.memorySize = 1024;
155           virtualisation.diskSize = 2048;
156           services.k3s = {
157             inherit tokenFile;
158             enable = true;
159             role = "agent";
160             package = k3s;
161             images = [ pauseImage ];
162             serverAddr = "https://192.168.1.3:6443";
163             extraFlags = [
164               "--pause-image test.local/pause:local"
165               "--node-ip 192.168.1.2"
166             ];
167           };
168           networking.firewall.allowedTCPPorts = [ 6443 ];
169           networking.firewall.allowedUDPPorts = [ 8472 ];
170           networking.firewall.trustedInterfaces = [ "flannel.1" ];
171           networking.useDHCP = false;
172           networking.defaultGateway = "192.168.1.2";
173           networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
174             {
175               address = "192.168.1.2";
176               prefixLength = 24;
177             }
178           ];
179         };
180     };
182     testScript = # python
183       ''
184         start_all()
186         machines = [server, server2, agent]
187         for m in machines:
188             m.wait_for_unit("k3s")
190         # wait for the agent to show up
191         server.wait_until_succeeds("k3s kubectl get node agent")
193         for m in machines:
194             m.succeed("k3s check-config")
196         server.succeed("k3s kubectl cluster-info")
197         # Also wait for our service account to show up; it takes a sec
198         server.wait_until_succeeds("k3s kubectl get serviceaccount default")
200         # Now create a pod on each node via a daemonset and verify they can talk to each other.
201         server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
202         server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
204         # Get pod IPs
205         pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
206         pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
208         # Verify each server can ping each pod ip
209         for pod_ip in pod_ips:
210             server.succeed(f"ping -c 1 {pod_ip}")
211             server2.succeed(f"ping -c 1 {pod_ip}")
212             agent.succeed(f"ping -c 1 {pod_ip}")
213             # Verify the pods can talk to each other
214             for pod in pods:
215                 resp = server.succeed(f"k3s kubectl exec {pod} -- socat TCP:{pod_ip}:8000 -")
216                 assert resp.strip() == "server"
217       '';
219     meta.maintainers = lib.teams.k3s.members;
220   }