1 import ./make-test-python.nix ({pkgs, lib, ...}:
5 clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
12 key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
13 uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
17 key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
18 uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
22 key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
23 uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
26 generateCephConfig = { daemonConfig }: {
30 monHost = cfg.monA.ip;
31 monInitialMembers = cfg.monA.name;
35 generateHost = { pkgs, cephConfig, networkConfig, ... }: {
37 emptyDiskImages = [ 20480 20480 20480 ];
41 networking = networkConfig;
43 environment.systemPackages = with pkgs; [
50 boot.kernelModules = [ "xfs" ];
52 services.ceph = cephConfig;
56 dhcpcd.enable = false;
57 interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
58 { address = cfg.monA.ip; prefixLength = 24; }
61 cephConfigMonA = generateCephConfig { daemonConfig = {
64 daemons = [ cfg.monA.name ];
68 daemons = [ cfg.monA.name ];
72 daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
76 # Following deployment is based on the manual deployment described here:
77 # https://docs.ceph.com/docs/master/install/manual-deployment/
78 # For other ways to deploy a ceph cluster, look at the documentation at
79 # https://docs.ceph.com/docs/master/
80 testscript = { ... }: ''
83 monA.wait_for_unit("network.target")
85 # Bootstrap ceph-mon daemon
87 "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
88 "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
89 "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
90 "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
91 "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
92 "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
93 "systemctl start ceph-mon-${cfg.monA.name}",
95 monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
96 monA.succeed("ceph mon enable-msgr2")
97 monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
99 # Can't check ceph status until a mon is up
100 monA.succeed("ceph -s | grep 'mon: 1 daemons'")
102 # Start the ceph-mgr daemon, after copying in the keyring
104 "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
105 "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
106 "systemctl start ceph-mgr-${cfg.monA.name}",
108 monA.wait_for_unit("ceph-mgr-a")
109 monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
110 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
114 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
115 "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type",
116 "ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block",
117 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
118 "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type",
119 "ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block",
120 "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
121 "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type",
122 "ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block",
123 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
124 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
125 "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
126 'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
127 'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
128 'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
131 # Initialize the OSDs with regular filestore
133 "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
134 "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
135 "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
136 "chown -R ceph:ceph /var/lib/ceph/osd",
137 "systemctl start ceph-osd-${cfg.osd0.name}",
138 "systemctl start ceph-osd-${cfg.osd1.name}",
139 "systemctl start ceph-osd-${cfg.osd2.name}",
141 monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
142 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
143 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
146 "ceph osd pool create single-node-test 32 32",
147 "ceph osd pool ls | grep 'single-node-test'",
149 # We need to enable an application on the pool, otherwise it will
150 # stay unhealthy in state POOL_APP_NOT_ENABLED.
151 # Creating a CephFS would do this automatically, but we haven't done that here.
152 # See: https://docs.ceph.com/en/reef/rados/operations/pools/#associating-a-pool-with-an-application
153 # We use the custom application name "nixos-test" for this.
154 "ceph osd pool application enable single-node-test nixos-test",
156 "ceph osd pool rename single-node-test single-node-other-test",
157 "ceph osd pool ls | grep 'single-node-other-test'",
159 monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
161 "ceph osd getcrushmap -o crush",
162 "crushtool -d crush -o decrushed",
163 "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
164 "crushtool -c modcrush -o recrushed",
165 "ceph osd setcrushmap -i recrushed",
166 "ceph osd pool set single-node-other-test size 2",
168 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
169 monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
171 "ceph osd pool ls | grep 'multi-node-test'",
172 "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
175 # Shut down ceph by stopping ceph.target.
176 monA.succeed("systemctl stop ceph.target")
179 monA.succeed("systemctl start ceph.target")
180 monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
181 monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
182 monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
183 monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
184 monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
186 # Ensure the cluster comes back up again
187 monA.succeed("ceph -s | grep 'mon: 1 daemons'")
188 monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
189 monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
190 monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
191 monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
194 name = "basic-single-node-ceph-cluster-bluestore";
195 meta = with pkgs.lib.maintainers; {
196 maintainers = [ lukegb ];
200 monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
203 testScript = testscript;