5 "druid.zk.service.host" = "zk1:2181";
6 "druid.extensions.loadList" =
7 ''[ "druid-histogram", "druid-datasketches", "mysql-metadata-storage", "druid-avro-extensions", "druid-parquet-extensions", "druid-lookups-cached-global", "druid-hdfs-storage","druid-kafka-indexing-service","druid-basic-security","druid-kinesis-indexing-service"]'';
8 "druid.startup.logging.logProperties" = "true";
9 "druid.metadata.storage.connector.connectURI" = "jdbc:mysql://mysql:3306/druid";
10 "druid.metadata.storage.connector.user" = "druid";
11 "druid.metadata.storage.connector.password" = "druid";
12 "druid.request.logging.type" = "file";
13 "druid.request.logging.dir" = "/var/log/druid/requests";
14 "druid.javascript.enabled" = "true";
15 "druid.sql.enable" = "true";
16 "druid.metadata.storage.type" = "mysql";
17 "druid.storage.type" = "hdfs";
18 "druid.storage.storageDirectory" = "/druid-deepstore";
21 <?xml version="1.0" encoding="UTF-8" ?>
22 <Configuration status="WARN">
24 <Console name="Console" target="SYSTEM_OUT">
25 <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
30 <AppenderRef ref="Console"/>
35 log4j = pkgs.writeText "log4j2.xml" log4jConfig;
37 "fs.defaultFS" = "hdfs://namenode:8020";
40 default = testsForPackage {
41 druidPackage = pkgs.druid;
42 hadoopPackage = pkgs.hadoop_3_3;
47 lib.recurseIntoAttrs {
48 druidCluster = testDruidCluster args;
49 passthru.override = args': testsForPackage (args // args');
52 { druidPackage, hadoopPackage, ... }:
53 pkgs.testers.nixosTest {
59 services.zookeeper.enable = true;
60 networking.firewall.allowedTCPPorts = [ 2181 ];
66 package = hadoopPackage;
81 package = hadoopPackage;
92 virtualisation.memorySize = 1024;
94 inherit commonConfig log4j;
95 package = druidPackage;
96 extraClassPaths = [ "/etc/hadoop-conf" ];
99 "druid.indexer.task.baseTaskDir" = "/tmp/druid/persistent/task";
100 "druid.worker.capacity" = 1;
101 "druid.indexer.logs.type" = "file";
102 "druid.indexer.logs.directory" = "/var/log/druid/indexer";
103 "druid.indexer.runner.startPort" = 8100;
104 "druid.indexer.runner.endPort" = 8101;
111 gatewayRole.enable = true;
112 package = hadoopPackage;
120 inherit commonConfig log4j;
121 package = druidPackage;
122 extraClassPaths = [ "/etc/hadoop-conf" ];
125 "druid.indexer.runner.type" = "remote";
126 "druid.indexer.storage.type" = "metadata";
133 gatewayRole.enable = true;
134 package = hadoopPackage;
142 package = druidPackage;
143 inherit commonConfig log4j;
144 extraClassPaths = [ "/etc/hadoop-conf" ];
147 "druid.plaintextPort" = 8082;
148 "druid.broker.http.numConnections" = "2";
149 "druid.server.http.numThreads" = "2";
150 "druid.processing.buffer.sizeBytes" = "100";
151 "druid.processing.numThreads" = "1";
152 "druid.processing.numMergeBuffers" = "1";
153 "druid.broker.cache.unCacheable" = ''["groupBy"]'';
154 "druid.lookup.snapshotWorkingDir" = "/opt/broker/lookups";
161 gatewayRole.enable = true;
162 package = hadoopPackage;
171 package = druidPackage;
172 inherit commonConfig log4j;
173 extraClassPaths = [ "/etc/hadoop-conf" ];
176 "maxSize" = 200000000;
177 "druid.lookup.snapshotWorkingDir" = "/opt/historical/lookups";
182 "maxSize" = "100000000";
186 "maxSize" = "100000000";
194 gatewayRole.enable = true;
195 package = hadoopPackage;
204 package = druidPackage;
205 inherit commonConfig log4j;
206 extraClassPaths = [ "/etc/hadoop-conf" ];
209 "druid.plaintextPort" = 9091;
210 "druid.service" = "coordinator";
211 "druid.coordinator.startDelay" = "PT10S";
212 "druid.coordinator.period" = "PT10S";
213 "druid.manager.config.pollDuration" = "PT10S";
214 "druid.manager.segments.pollDuration" = "PT10S";
215 "druid.manager.rules.pollDuration" = "PT10S";
222 gatewayRole.enable = true;
223 package = hadoopPackage;
234 package = pkgs.mariadb;
235 initialDatabases = [ { name = "druid"; } ];
236 initialScript = pkgs.writeText "mysql-init.sql" ''
237 CREATE USER 'druid'@'%' IDENTIFIED BY 'druid';
238 GRANT ALL PRIVILEGES ON druid.* TO 'druid'@'%';
241 networking.firewall.allowedTCPPorts = [ 3306 ];
247 namenode.wait_for_unit("hdfs-namenode")
248 namenode.wait_for_unit("network.target")
249 namenode.wait_for_open_port(8020)
250 namenode.succeed("ss -tulpne | systemd-cat")
251 namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
252 namenode.wait_for_open_port(9870)
253 datanode.wait_for_unit("hdfs-datanode")
254 datanode.wait_for_unit("network.target")
256 mm.succeed("mkdir -p /quickstart/")
257 mm.succeed("cp -r ${pkgs.druid}/quickstart/* /quickstart/")
258 mm.succeed("touch /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
259 mm.succeed("zcat /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz | head -n 10 > /quickstart/tutorial/wikiticker-2015-09-12-sampled.json || true")
260 mm.succeed("rm /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz && gzip /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
262 namenode.succeed("sudo -u hdfs hdfs dfs -mkdir /druid-deepstore")
263 namenode.succeed("HADOOP_USER_NAME=druid sudo -u hdfs hdfs dfs -chown druid:hadoop /druid-deepstore")
267 coordinator.wait_for_unit("druid-coordinator")
268 overlord.wait_for_unit("druid-overlord")
269 historical.wait_for_unit("druid-historical")
270 mm.wait_for_unit("druid-middleManager")
272 coordinator.wait_for_open_port(9091)
273 overlord.wait_for_open_port(8090)
274 historical.wait_for_open_port(8083)
275 mm.wait_for_open_port(8091)
277 broker.wait_for_unit("network.target")
278 broker.wait_for_open_port(8082)
280 broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-index.json http://coordinator:9091/druid/indexer/v1/task")
281 broker.wait_until_succeeds("curl http://coordinator:9091/druid/coordinator/v1/metadata/datasources | grep 'wikipedia'")
283 broker.wait_until_succeeds("curl http://localhost:8082/druid/v2/datasources/ | grep wikipedia")
284 broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-top-pages.json http://localhost:8082/druid/v2/")