1 import ./make-test-python.nix ({pkgs, lib, ...}:
5 clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
13 key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
14 uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
19 key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
20 uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
25 key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
26 uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
29 generateCephConfig = { daemonConfig }: {
33 monHost = cfg.monA.ip;
34 monInitialMembers = cfg.monA.name;
38 generateHost = { pkgs, cephConfig, networkConfig, ... }: {
40 emptyDiskImages = [ 20480 ];
44 networking = networkConfig;
46 environment.systemPackages = with pkgs; [
54 boot.kernelModules = [ "xfs" ];
56 services.ceph = cephConfig;
60 dhcpcd.enable = false;
61 interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
62 { address = cfg.monA.ip; prefixLength = 24; }
65 allowedTCPPorts = [ 6789 3300 ];
66 allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
69 cephConfigMonA = generateCephConfig { daemonConfig = {
72 daemons = [ cfg.monA.name ];
76 daemons = [ cfg.monA.name ];
81 dhcpcd.enable = false;
82 interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
83 { address = osd.ip; prefixLength = 24; }
86 allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
90 cephConfigOsd = osd: generateCephConfig { daemonConfig = {
93 daemons = [ osd.name ];
97 # Following deployment is based on the manual deployment described here:
98 # https://docs.ceph.com/docs/master/install/manual-deployment/
99 # For other ways to deploy a ceph cluster, look at the documentation at
100 # https://docs.ceph.com/docs/master/
101 testscript = { ... }: ''
104 monA.wait_for_unit("network.target")
105 osd0.wait_for_unit("network.target")
106 osd1.wait_for_unit("network.target")
107 osd2.wait_for_unit("network.target")
109 # Bootstrap ceph-mon daemon
111 "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
112 "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
113 "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
114 "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
115 "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
116 "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
117 "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
118 "systemctl start ceph-mon-${cfg.monA.name}",
120 monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
121 monA.succeed("ceph mon enable-msgr2")
122 monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
124 # Can't check ceph status until a mon is up
125 monA.succeed("ceph -s | grep 'mon: 1 daemons'")
127 # Start the ceph-mgr daemon, it has no deps and hardly any setup
129 "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
130 "systemctl start ceph-mgr-${cfg.monA.name}",
132 monA.wait_for_unit("ceph-mgr-a")
133 monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
134 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
136 # Send the admin keyring to the OSD machines
137 monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
138 osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
139 osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
140 osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
145 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
146 "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
147 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
148 'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
152 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
153 "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
154 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
155 'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
159 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
160 "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
161 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
162 'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
165 # Initialize the OSDs with regular filestore
167 "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
168 "chown -R ceph:ceph /var/lib/ceph/osd",
169 "systemctl start ceph-osd-${cfg.osd0.name}",
172 "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
173 "chown -R ceph:ceph /var/lib/ceph/osd",
174 "systemctl start ceph-osd-${cfg.osd1.name}",
177 "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
178 "chown -R ceph:ceph /var/lib/ceph/osd",
179 "systemctl start ceph-osd-${cfg.osd2.name}",
181 monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
182 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
183 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
186 "ceph osd pool create multi-node-test 32 32",
187 "ceph osd pool ls | grep 'multi-node-test'",
189 # We need to enable an application on the pool, otherwise it will
190 # stay unhealthy in state POOL_APP_NOT_ENABLED.
191 # Creating a CephFS would do this automatically, but we haven't done that here.
192 # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
193 # We use the custom application name "nixos-test" for this.
194 "ceph osd pool application enable multi-node-test nixos-test",
196 "ceph osd pool rename multi-node-test multi-node-other-test",
197 "ceph osd pool ls | grep 'multi-node-other-test'",
199 monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
200 monA.succeed("ceph osd pool set multi-node-other-test size 2")
201 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
202 monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
204 "ceph osd pool ls | grep 'multi-node-test'",
205 "ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
208 # Shut down ceph on all machines in a very unpolite way
220 # Ensure the cluster comes back up again
221 monA.succeed("ceph -s | grep 'mon: 1 daemons'")
222 monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
223 monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
224 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
225 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
228 name = "basic-multi-node-ceph-cluster";
229 meta = with pkgs.lib.maintainers; {
230 maintainers = [ lejonet ];
234 monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
235 osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd0; networkConfig = networkOsd cfg.osd0; };
236 osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd1; networkConfig = networkOsd cfg.osd1; };
237 osd2 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd2; networkConfig = networkOsd cfg.osd2; };
240 testScript = testscript;