parinfer-rust: useFetchCargoVendor
[NixPkgs.git] / nixos / tests / druid / default.nix
blob1c645785c11d6248d406e8a49cf51b5f91b56623
1 { pkgs, ... }:
2 let
3   inherit (pkgs) lib;
4   commonConfig = {
5     "druid.zk.service.host" = "zk1:2181";
6     "druid.extensions.loadList" =
7       ''[ "druid-histogram", "druid-datasketches",  "mysql-metadata-storage", "druid-avro-extensions", "druid-parquet-extensions", "druid-lookups-cached-global", "druid-hdfs-storage","druid-kafka-indexing-service","druid-basic-security","druid-kinesis-indexing-service"]'';
8     "druid.startup.logging.logProperties" = "true";
9     "druid.metadata.storage.connector.connectURI" = "jdbc:mysql://mysql:3306/druid";
10     "druid.metadata.storage.connector.user" = "druid";
11     "druid.metadata.storage.connector.password" = "druid";
12     "druid.request.logging.type" = "file";
13     "druid.request.logging.dir" = "/var/log/druid/requests";
14     "druid.javascript.enabled" = "true";
15     "druid.sql.enable" = "true";
16     "druid.metadata.storage.type" = "mysql";
17     "druid.storage.type" = "hdfs";
18     "druid.storage.storageDirectory" = "/druid-deepstore";
19   };
20   log4jConfig = ''
21     <?xml version="1.0" encoding="UTF-8" ?>
22     <Configuration status="WARN">
23      <Appenders>
24         <Console name="Console" target="SYSTEM_OUT">
25           <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
26         </Console>
27       </Appenders>
28       <Loggers>
29         <Root level="error">
30           <AppenderRef ref="Console"/>
31         </Root>
32       </Loggers>
33     </Configuration>
34   '';
35   log4j = pkgs.writeText "log4j2.xml" log4jConfig;
36   coreSite = {
37     "fs.defaultFS" = "hdfs://namenode:8020";
38   };
39   tests = {
40     default = testsForPackage {
41       druidPackage = pkgs.druid;
42       hadoopPackage = pkgs.hadoop_3_3;
43     };
44   };
45   testsForPackage =
46     args:
47     lib.recurseIntoAttrs {
48       druidCluster = testDruidCluster args;
49       passthru.override = args': testsForPackage (args // args');
50     };
51   testDruidCluster =
52     { druidPackage, hadoopPackage, ... }:
53     pkgs.testers.nixosTest {
54       name = "druid-hdfs";
55       nodes = {
56         zk1 =
57           { ... }:
58           {
59             services.zookeeper.enable = true;
60             networking.firewall.allowedTCPPorts = [ 2181 ];
61           };
62         namenode =
63           { ... }:
64           {
65             services.hadoop = {
66               package = hadoopPackage;
67               hdfs = {
68                 namenode = {
69                   enable = true;
70                   openFirewall = true;
71                   formatOnInit = true;
72                 };
73               };
74               inherit coreSite;
75             };
76           };
77         datanode =
78           { ... }:
79           {
80             services.hadoop = {
81               package = hadoopPackage;
82               hdfs.datanode = {
83                 enable = true;
84                 openFirewall = true;
85               };
86               inherit coreSite;
87             };
88           };
89         mm =
90           { ... }:
91           {
92             virtualisation.memorySize = 1024;
93             services.druid = {
94               inherit commonConfig log4j;
95               package = druidPackage;
96               extraClassPaths = [ "/etc/hadoop-conf" ];
97               middleManager = {
98                 config = {
99                   "druid.indexer.task.baseTaskDir" = "/tmp/druid/persistent/task";
100                   "druid.worker.capacity" = 1;
101                   "druid.indexer.logs.type" = "file";
102                   "druid.indexer.logs.directory" = "/var/log/druid/indexer";
103                   "druid.indexer.runner.startPort" = 8100;
104                   "druid.indexer.runner.endPort" = 8101;
105                 };
106                 enable = true;
107                 openFirewall = true;
108               };
109             };
110             services.hadoop = {
111               gatewayRole.enable = true;
112               package = hadoopPackage;
113               inherit coreSite;
114             };
115           };
116         overlord =
117           { ... }:
118           {
119             services.druid = {
120               inherit commonConfig log4j;
121               package = druidPackage;
122               extraClassPaths = [ "/etc/hadoop-conf" ];
123               overlord = {
124                 config = {
125                   "druid.indexer.runner.type" = "remote";
126                   "druid.indexer.storage.type" = "metadata";
127                 };
128                 enable = true;
129                 openFirewall = true;
130               };
131             };
132             services.hadoop = {
133               gatewayRole.enable = true;
134               package = hadoopPackage;
135               inherit coreSite;
136             };
137           };
138         broker =
139           { ... }:
140           {
141             services.druid = {
142               package = druidPackage;
143               inherit commonConfig log4j;
144               extraClassPaths = [ "/etc/hadoop-conf" ];
145               broker = {
146                 config = {
147                   "druid.plaintextPort" = 8082;
148                   "druid.broker.http.numConnections" = "2";
149                   "druid.server.http.numThreads" = "2";
150                   "druid.processing.buffer.sizeBytes" = "100";
151                   "druid.processing.numThreads" = "1";
152                   "druid.processing.numMergeBuffers" = "1";
153                   "druid.broker.cache.unCacheable" = ''["groupBy"]'';
154                   "druid.lookup.snapshotWorkingDir" = "/opt/broker/lookups";
155                 };
156                 enable = true;
157                 openFirewall = true;
158               };
159             };
160             services.hadoop = {
161               gatewayRole.enable = true;
162               package = hadoopPackage;
163               inherit coreSite;
164             };
166           };
167         historical =
168           { ... }:
169           {
170             services.druid = {
171               package = druidPackage;
172               inherit commonConfig log4j;
173               extraClassPaths = [ "/etc/hadoop-conf" ];
174               historical = {
175                 config = {
176                   "maxSize" = 200000000;
177                   "druid.lookup.snapshotWorkingDir" = "/opt/historical/lookups";
178                 };
179                 segmentLocations = [
180                   {
181                     "path" = "/tmp/1";
182                     "maxSize" = "100000000";
183                   }
184                   {
185                     "path" = "/tmp/2";
186                     "maxSize" = "100000000";
187                   }
188                 ];
189                 enable = true;
190                 openFirewall = true;
191               };
192             };
193             services.hadoop = {
194               gatewayRole.enable = true;
195               package = hadoopPackage;
196               inherit coreSite;
197             };
199           };
200         coordinator =
201           { ... }:
202           {
203             services.druid = {
204               package = druidPackage;
205               inherit commonConfig log4j;
206               extraClassPaths = [ "/etc/hadoop-conf" ];
207               coordinator = {
208                 config = {
209                   "druid.plaintextPort" = 9091;
210                   "druid.service" = "coordinator";
211                   "druid.coordinator.startDelay" = "PT10S";
212                   "druid.coordinator.period" = "PT10S";
213                   "druid.manager.config.pollDuration" = "PT10S";
214                   "druid.manager.segments.pollDuration" = "PT10S";
215                   "druid.manager.rules.pollDuration" = "PT10S";
216                 };
217                 enable = true;
218                 openFirewall = true;
219               };
220             };
221             services.hadoop = {
222               gatewayRole.enable = true;
223               package = hadoopPackage;
224               inherit coreSite;
225             };
227           };
229         mysql =
230           { ... }:
231           {
232             services.mysql = {
233               enable = true;
234               package = pkgs.mariadb;
235               initialDatabases = [ { name = "druid"; } ];
236               initialScript = pkgs.writeText "mysql-init.sql" ''
237                 CREATE USER 'druid'@'%' IDENTIFIED BY 'druid';
238                 GRANT ALL PRIVILEGES ON druid.* TO 'druid'@'%';
239               '';
240             };
241             networking.firewall.allowedTCPPorts = [ 3306 ];
242           };
244       };
245       testScript = ''
246         start_all()
247         namenode.wait_for_unit("hdfs-namenode")
248         namenode.wait_for_unit("network.target")
249         namenode.wait_for_open_port(8020)
250         namenode.succeed("ss -tulpne | systemd-cat")
251         namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
252         namenode.wait_for_open_port(9870)
253         datanode.wait_for_unit("hdfs-datanode")
254         datanode.wait_for_unit("network.target")
256         mm.succeed("mkdir -p /quickstart/")
257         mm.succeed("cp -r ${pkgs.druid}/quickstart/* /quickstart/")
258         mm.succeed("touch /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
259         mm.succeed("zcat /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz | head -n 10 > /quickstart/tutorial/wikiticker-2015-09-12-sampled.json || true")
260         mm.succeed("rm /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz && gzip /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
262         namenode.succeed("sudo -u hdfs hdfs dfs -mkdir /druid-deepstore")
263         namenode.succeed("HADOOP_USER_NAME=druid sudo -u hdfs hdfs dfs -chown druid:hadoop /druid-deepstore")
266         ### Druid tests
267         coordinator.wait_for_unit("druid-coordinator")
268         overlord.wait_for_unit("druid-overlord")
269         historical.wait_for_unit("druid-historical")
270         mm.wait_for_unit("druid-middleManager")
272         coordinator.wait_for_open_port(9091)
273         overlord.wait_for_open_port(8090)
274         historical.wait_for_open_port(8083)
275         mm.wait_for_open_port(8091)
277         broker.wait_for_unit("network.target")
278         broker.wait_for_open_port(8082)
280         broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-index.json http://coordinator:9091/druid/indexer/v1/task")
281         broker.wait_until_succeeds("curl http://coordinator:9091/druid/coordinator/v1/metadata/datasources | grep  'wikipedia'")
283         broker.wait_until_succeeds("curl http://localhost:8082/druid/v2/datasources/ | grep wikipedia")
284         broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-top-pages.json http://localhost:8082/druid/v2/")
286       '';
288     };
290 tests