vuls: init at 0.27.0
[NixPkgs.git] / nixos / tests / druid / default.nix
blobd4b7c9bffa772a21b5d0ec728d3d5d111b16d3b3
1 { pkgs, ... }:
2 let
3   inherit (pkgs) lib;
4   commonConfig = {
5     "druid.zk.service.host" = "zk1:2181";
6     "druid.extensions.loadList" = ''[ "druid-histogram", "druid-datasketches",  "mysql-metadata-storage", "druid-avro-extensions", "druid-parquet-extensions", "druid-lookups-cached-global", "druid-hdfs-storage","druid-kafka-indexing-service","druid-basic-security","druid-kinesis-indexing-service"]'';
7     "druid.startup.logging.logProperties" = "true";
8     "druid.metadata.storage.connector.connectURI" = "jdbc:mysql://mysql:3306/druid";
9     "druid.metadata.storage.connector.user" = "druid";
10     "druid.metadata.storage.connector.password" = "druid";
11     "druid.request.logging.type" = "file";
12     "druid.request.logging.dir" = "/var/log/druid/requests";
13     "druid.javascript.enabled" = "true";
14     "druid.sql.enable" = "true";
15     "druid.metadata.storage.type" = "mysql";
16     "druid.storage.type" = "hdfs";
17     "druid.storage.storageDirectory" = "/druid-deepstore";
18   };
19   log4jConfig = ''
20     <?xml version="1.0" encoding="UTF-8" ?>
21     <Configuration status="WARN">
22      <Appenders>
23         <Console name="Console" target="SYSTEM_OUT">
24           <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
25         </Console>
26       </Appenders>
27       <Loggers>
28         <Root level="error">
29           <AppenderRef ref="Console"/>
30         </Root>
31       </Loggers>
32     </Configuration>
33   '';
34   log4j = pkgs.writeText "log4j2.xml" log4jConfig;
35   coreSite = {
36     "fs.defaultFS" = "hdfs://namenode:8020";
37   };
38   tests = {
39     default = testsForPackage {
40       druidPackage = pkgs.druid;
41       hadoopPackage = pkgs.hadoop_3_2;
42     };
43   };
44   testsForPackage =
45     args:
46     lib.recurseIntoAttrs {
47       druidCluster = testDruidCluster args;
48       passthru.override = args': testsForPackage (args // args');
49     };
50   testDruidCluster =
51     { druidPackage, hadoopPackage, ... }:
52     pkgs.testers.nixosTest {
53       name = "druid-hdfs";
54       nodes = {
55         zk1 =
56           { ... }:
57           {
58             services.zookeeper.enable = true;
59             networking.firewall.allowedTCPPorts = [ 2181 ];
60           };
61         namenode =
62           { ... }:
63           {
64             services.hadoop = {
65               package = hadoopPackage;
66               hdfs = {
67                 namenode = {
68                   enable = true;
69                   openFirewall = true;
70                   formatOnInit = true;
71                 };
72               };
73               inherit coreSite;
74             };
75           };
76         datanode =
77           { ... }:
78           {
79             services.hadoop = {
80               package = hadoopPackage;
81               hdfs.datanode = {
82                 enable = true;
83                 openFirewall = true;
84               };
85               inherit coreSite;
86             };
87           };
88         mm =
89           { ... }:
90           {
91             virtualisation.memorySize = 1024;
92             services.druid = {
93               inherit commonConfig log4j;
94               package = druidPackage;
95               extraClassPaths = [ "/etc/hadoop-conf" ];
96               middleManager = {
97                 config = {
98                   "druid.indexer.task.baseTaskDir" = "/tmp/druid/persistent/task";
99                   "druid.worker.capacity" = 1;
100                   "druid.indexer.logs.type" = "file";
101                   "druid.indexer.logs.directory" = "/var/log/druid/indexer";
102                   "druid.indexer.runner.startPort" = 8100;
103                   "druid.indexer.runner.endPort" = 8101;
104                 };
105                 enable = true;
106                 openFirewall = true;
107               };
108             };
109             services.hadoop = {
110               gatewayRole.enable = true;
111               package = hadoopPackage;
112               inherit coreSite;
113             };
114           };
115         overlord =
116           { ... }:
117           {
118             services.druid = {
119               inherit commonConfig log4j;
120               package = druidPackage;
121               extraClassPaths = [ "/etc/hadoop-conf" ];
122               overlord = {
123                 config = {
124                   "druid.indexer.runner.type" = "remote";
125                   "druid.indexer.storage.type" = "metadata";
126                 };
127                 enable = true;
128                 openFirewall = true;
129               };
130             };
131             services.hadoop = {
132               gatewayRole.enable = true;
133               package = hadoopPackage;
134               inherit coreSite;
135             };
136           };
137         broker =
138           { ... }:
139           {
140             services.druid = {
141               package = druidPackage;
142               inherit commonConfig log4j;
143               extraClassPaths = [ "/etc/hadoop-conf" ];
144               broker = {
145                 config = {
146                   "druid.plaintextPort" = 8082;
147                   "druid.broker.http.numConnections" = "2";
148                   "druid.server.http.numThreads" = "2";
149                   "druid.processing.buffer.sizeBytes" = "100";
150                   "druid.processing.numThreads" = "1";
151                   "druid.processing.numMergeBuffers" = "1";
152                   "druid.broker.cache.unCacheable" = ''["groupBy"]'';
153                   "druid.lookup.snapshotWorkingDir" = "/opt/broker/lookups";
154                 };
155                 enable = true;
156                 openFirewall = true;
157               };
158             };
159             services.hadoop = {
160               gatewayRole.enable = true;
161               package = hadoopPackage;
162               inherit coreSite;
163             };
165           };
166         historical =
167           { ... }:
168           {
169             services.druid = {
170               package = druidPackage;
171               inherit commonConfig log4j;
172               extraClassPaths = [ "/etc/hadoop-conf" ];
173               historical = {
174                 config = {
175                   "maxSize" = 200000000;
176                   "druid.lookup.snapshotWorkingDir" = "/opt/historical/lookups";
177                 };
178                 segmentLocations = [
179                   {
180                     "path" = "/tmp/1";
181                     "maxSize" = "100000000";
182                   }
183                   {
184                     "path" = "/tmp/2";
185                     "maxSize" = "100000000";
186                   }
187                 ];
188                 enable = true;
189                 openFirewall = true;
190               };
191             };
192             services.hadoop = {
193               gatewayRole.enable = true;
194               package = hadoopPackage;
195               inherit coreSite;
196             };
198           };
199         coordinator =
200           { ... }:
201           {
202             services.druid = {
203               package = druidPackage;
204               inherit commonConfig log4j;
205               extraClassPaths = [ "/etc/hadoop-conf" ];
206               coordinator = {
207                 config = {
208                   "druid.plaintextPort" = 9091;
209                   "druid.service" = "coordinator";
210                   "druid.coordinator.startDelay" = "PT10S";
211                   "druid.coordinator.period" = "PT10S";
212                   "druid.manager.config.pollDuration" = "PT10S";
213                   "druid.manager.segments.pollDuration" = "PT10S";
214                   "druid.manager.rules.pollDuration" = "PT10S";
215                 };
216                 enable = true;
217                 openFirewall = true;
218               };
219             };
220             services.hadoop = {
221               gatewayRole.enable = true;
222               package = hadoopPackage;
223               inherit coreSite;
224             };
226           };
228         mysql =
229           { ... }:
230           {
231             services.mysql = {
232               enable = true;
233               package = pkgs.mariadb;
234               initialDatabases = [ { name = "druid"; } ];
235               initialScript = pkgs.writeText "mysql-init.sql" ''
236                 CREATE USER 'druid'@'%' IDENTIFIED BY 'druid';
237                 GRANT ALL PRIVILEGES ON druid.* TO 'druid'@'%';
238               '';
239             };
240             networking.firewall.allowedTCPPorts = [ 3306 ];
241           };
243       };
244       testScript = ''
245         start_all()
246         namenode.wait_for_unit("hdfs-namenode")
247         namenode.wait_for_unit("network.target")
248         namenode.wait_for_open_port(8020)
249         namenode.succeed("ss -tulpne | systemd-cat")
250         namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
251         namenode.wait_for_open_port(9870)
252         datanode.wait_for_unit("hdfs-datanode")
253         datanode.wait_for_unit("network.target")
255         mm.succeed("mkdir -p /quickstart/")
256         mm.succeed("cp -r ${pkgs.druid}/quickstart/* /quickstart/")
257         mm.succeed("touch /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
258         mm.succeed("zcat /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz | head -n 10 > /quickstart/tutorial/wikiticker-2015-09-12-sampled.json || true")
259         mm.succeed("rm /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz && gzip /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
261         namenode.succeed("sudo -u hdfs hdfs dfs -mkdir /druid-deepstore")
262         namenode.succeed("HADOOP_USER_NAME=druid sudo -u hdfs hdfs dfs -chown druid:hadoop /druid-deepstore")
265         ### Druid tests
266         coordinator.wait_for_unit("druid-coordinator")
267         overlord.wait_for_unit("druid-overlord")
268         historical.wait_for_unit("druid-historical")
269         mm.wait_for_unit("druid-middleManager")
271         coordinator.wait_for_open_port(9091)
272         overlord.wait_for_open_port(8090)
273         historical.wait_for_open_port(8083)
274         mm.wait_for_open_port(8091)
276         broker.wait_for_unit("network.target")
277         broker.wait_for_open_port(8082)
279         broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-index.json http://coordinator:9091/druid/indexer/v1/task")
280         broker.wait_until_succeeds("curl http://coordinator:9091/druid/coordinator/v1/metadata/datasources | grep  'wikipedia'")
282         broker.wait_until_succeeds("curl http://localhost:8082/druid/v2/datasources/ | grep wikipedia")
283         broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-top-pages.json http://localhost:8082/druid/v2/")
285       '';
287     };
289 tests