HBASE-26921 Rewrite the counting cells part in TestMultiVersions (#4316)
[hbase.git] / hbase-mapreduce / src / test / java / org / apache / hadoop / hbase / TestPerformanceEvaluation.java
blobd1f8cc08b2699a6b990941baa11e86529dee419c
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertFalse;
22 import static org.junit.Assert.assertNotNull;
23 import static org.junit.Assert.assertTrue;
24 import static org.junit.Assert.fail;
26 import com.codahale.metrics.Histogram;
27 import com.codahale.metrics.Snapshot;
28 import com.codahale.metrics.UniformReservoir;
29 import java.io.BufferedReader;
30 import java.io.ByteArrayInputStream;
31 import java.io.IOException;
32 import java.io.InputStreamReader;
33 import java.lang.reflect.Constructor;
34 import java.lang.reflect.InvocationTargetException;
35 import java.nio.charset.StandardCharsets;
36 import java.util.LinkedList;
37 import java.util.NoSuchElementException;
38 import java.util.Queue;
39 import java.util.Random;
40 import java.util.concurrent.ThreadLocalRandom;
42 import org.apache.hadoop.fs.FSDataInputStream;
43 import org.apache.hadoop.fs.FileSystem;
44 import org.apache.hadoop.fs.Path;
45 import org.apache.hadoop.hbase.PerformanceEvaluation.RandomReadTest;
46 import org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions;
47 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
48 import org.apache.hadoop.hbase.client.TableDescriptor;
49 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
50 import org.apache.hadoop.hbase.testclassification.MiscTests;
51 import org.apache.hadoop.hbase.testclassification.SmallTests;
52 import org.apache.hadoop.hbase.util.GsonUtil;
53 import org.junit.ClassRule;
54 import org.junit.Test;
55 import org.junit.experimental.categories.Category;
57 import org.apache.hbase.thirdparty.com.google.gson.Gson;
59 @Category({MiscTests.class, SmallTests.class})
60 public class TestPerformanceEvaluation {
61 @ClassRule
62 public static final HBaseClassTestRule CLASS_RULE =
63 HBaseClassTestRule.forClass(TestPerformanceEvaluation.class);
65 private static final HBaseTestingUtil HTU = new HBaseTestingUtil();
67 @Test
68 public void testDefaultInMemoryCompaction() {
69 PerformanceEvaluation.TestOptions defaultOpts =
70 new PerformanceEvaluation.TestOptions();
71 assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT,
72 defaultOpts.getInMemoryCompaction().toString());
73 TableDescriptor tableDescriptor = PerformanceEvaluation.getTableDescriptor(defaultOpts);
74 for (ColumnFamilyDescriptor familyDescriptor : tableDescriptor.getColumnFamilies()) {
75 assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT,
76 familyDescriptor.getInMemoryCompaction().toString());
80 @Test
81 public void testSerialization() {
82 PerformanceEvaluation.TestOptions options = new PerformanceEvaluation.TestOptions();
83 assertFalse(options.isAutoFlush());
84 options.setAutoFlush(true);
85 Gson gson = GsonUtil.createGson().create();
86 String optionsString = gson.toJson(options);
87 PerformanceEvaluation.TestOptions optionsDeserialized =
88 gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class);
89 assertTrue(optionsDeserialized.isAutoFlush());
92 /**
93 * Exercise the mr spec writing. Simple assertions to make sure it is basically working.
95 @Test
96 public void testWriteInputFile() throws IOException {
97 TestOptions opts = new PerformanceEvaluation.TestOptions();
98 final int clients = 10;
99 opts.setNumClientThreads(clients);
100 opts.setPerClientRunRows(10);
101 Path dir =
102 PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir());
103 FileSystem fs = FileSystem.get(HTU.getConfiguration());
104 Path p = new Path(dir, PerformanceEvaluation.JOB_INPUT_FILENAME);
105 long len = fs.getFileStatus(p).getLen();
106 assertTrue(len > 0);
107 byte[] content = new byte[(int) len];
108 try (FSDataInputStream dis = fs.open(p)) {
109 dis.readFully(content);
110 BufferedReader br = new BufferedReader(
111 new InputStreamReader(new ByteArrayInputStream(content), StandardCharsets.UTF_8));
112 int count = 0;
113 while (br.readLine() != null) {
114 count++;
116 assertEquals(clients, count);
120 @Test
121 public void testSizeCalculation() {
122 TestOptions opts = new PerformanceEvaluation.TestOptions();
123 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
124 int rows = opts.getPerClientRunRows();
125 // Default row count
126 final int defaultPerClientRunRows = 1024 * 1024;
127 assertEquals(defaultPerClientRunRows, rows);
128 // If size is 2G, then twice the row count.
129 opts.setSize(2.0f);
130 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
131 assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows());
132 // If two clients, then they get half the rows each.
133 opts.setNumClientThreads(2);
134 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
135 assertEquals(defaultPerClientRunRows, opts.getPerClientRunRows());
136 // What if valueSize is 'random'? Then half of the valueSize so twice the rows.
137 opts.valueRandom = true;
138 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
139 assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows());
142 @Test
143 public void testRandomReadCalculation() {
144 TestOptions opts = new PerformanceEvaluation.TestOptions();
145 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
146 int rows = opts.getPerClientRunRows();
147 // Default row count
148 final int defaultPerClientRunRows = 1024 * 1024;
149 assertEquals(defaultPerClientRunRows, rows);
150 // If size is 2G, then twice the row count.
151 opts.setSize(2.0f);
152 opts.setPerClientRunRows(1000);
153 opts.setCmdName(PerformanceEvaluation.RANDOM_READ);
154 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
155 assertEquals(1000, opts.getPerClientRunRows());
156 // If two clients, then they get half the rows each.
157 opts.setNumClientThreads(2);
158 opts = PerformanceEvaluation.calculateRowsAndSize(opts);
159 assertEquals(1000, opts.getPerClientRunRows());
160 // assuming we will get one before this loop expires
161 boolean foundValue = false;
162 Random rand = ThreadLocalRandom.current();
163 for (int i = 0; i < 10000000; i++) {
164 int randomRow = PerformanceEvaluation.generateRandomRow(rand, opts.totalRows);
165 if (randomRow > 1000) {
166 foundValue = true;
167 break;
170 assertTrue("We need to get a value more than 1000", foundValue);
173 @Test
174 public void testZipfian() throws NoSuchMethodException, SecurityException, InstantiationException,
175 IllegalAccessException, IllegalArgumentException, InvocationTargetException {
176 TestOptions opts = new PerformanceEvaluation.TestOptions();
177 opts.setValueZipf(true);
178 final int valueSize = 1024;
179 opts.setValueSize(valueSize);
180 RandomReadTest rrt = new RandomReadTest(null, opts, null);
181 Constructor<?> ctor =
182 Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class);
183 ctor.setAccessible(true);
184 Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500));
185 for (int i = 0; i < 100; i++) {
186 histogram.update(rrt.getValueLength(null));
188 Snapshot snapshot = histogram.getSnapshot();
189 double stddev = snapshot.getStdDev();
190 assertTrue(stddev != 0 && stddev != 1.0);
191 assertTrue(snapshot.getStdDev() != 0);
192 double median = snapshot.getMedian();
193 assertTrue(median != 0 && median != 1 && median != valueSize);
196 @Test
197 public void testSetBufferSizeOption() {
198 TestOptions opts = new PerformanceEvaluation.TestOptions();
199 long bufferSize = opts.getBufferSize();
200 assertEquals(bufferSize, 2L * 1024L * 1024L);
201 opts.setBufferSize(64L * 1024L);
202 bufferSize = opts.getBufferSize();
203 assertEquals(bufferSize, 64L * 1024L);
206 @Test
207 public void testParseOptsWithThreads() {
208 Queue<String> opts = new LinkedList<>();
209 String cmdName = "sequentialWrite";
210 int threads = 1;
211 opts.offer(cmdName);
212 opts.offer(String.valueOf(threads));
213 PerformanceEvaluation.TestOptions options = PerformanceEvaluation.parseOpts(opts);
214 assertNotNull(options);
215 assertNotNull(options.getCmdName());
216 assertEquals(cmdName, options.getCmdName());
217 assertEquals(threads, options.getNumClientThreads());
220 @Test
221 public void testParseOptsWrongThreads() {
222 Queue<String> opts = new LinkedList<>();
223 String cmdName = "sequentialWrite";
224 opts.offer(cmdName);
225 opts.offer("qq");
226 try {
227 PerformanceEvaluation.parseOpts(opts);
228 } catch (IllegalArgumentException e) {
229 System.out.println(e.getMessage());
230 assertEquals("Command " + cmdName + " does not have threads number", e.getMessage());
231 assertTrue(e.getCause() instanceof NumberFormatException);
235 @Test
236 public void testParseOptsNoThreads() {
237 Queue<String> opts = new LinkedList<>();
238 String cmdName = "sequentialWrite";
239 try {
240 PerformanceEvaluation.parseOpts(opts);
241 } catch (IllegalArgumentException e) {
242 System.out.println(e.getMessage());
243 assertEquals("Command " + cmdName + " does not have threads number", e.getMessage());
244 assertTrue(e.getCause() instanceof NoSuchElementException);
248 @Test
249 public void testParseOptsMultiPuts() {
250 Queue<String> opts = new LinkedList<>();
251 String cmdName = "sequentialWrite";
252 opts.offer("--multiPut=10");
253 opts.offer(cmdName);
254 opts.offer("64");
255 PerformanceEvaluation.TestOptions options = null;
256 try {
257 options = PerformanceEvaluation.parseOpts(opts);
258 fail("should fail");
259 } catch (IllegalArgumentException e) {
260 System.out.println(e.getMessage());
263 //Re-create options
264 opts = new LinkedList<>();
265 opts.offer("--autoFlush=true");
266 opts.offer("--multiPut=10");
267 opts.offer(cmdName);
268 opts.offer("64");
270 options = PerformanceEvaluation.parseOpts(opts);
271 assertNotNull(options);
272 assertNotNull(options.getCmdName());
273 assertEquals(cmdName, options.getCmdName());
274 assertEquals(10, options.getMultiPut());
277 @Test
278 public void testParseOptsMultiPutsAndAutoFlushOrder() {
279 Queue<String> opts = new LinkedList<>();
280 String cmdName = "sequentialWrite";
281 String cmdMultiPut = "--multiPut=10";
282 String cmdAutoFlush = "--autoFlush=true";
283 opts.offer(cmdAutoFlush);
284 opts.offer(cmdMultiPut);
285 opts.offer(cmdName);
286 opts.offer("64");
287 PerformanceEvaluation.TestOptions options = null;
288 options = PerformanceEvaluation.parseOpts(opts);
289 assertNotNull(options);
290 assertEquals(true, options.autoFlush);
291 assertEquals(10, options.getMultiPut());
293 // Change the order of AutoFlush and Multiput
294 opts = new LinkedList<>();
295 opts.offer(cmdMultiPut);
296 opts.offer(cmdAutoFlush);
297 opts.offer(cmdName);
298 opts.offer("64");
300 options = null;
301 options = PerformanceEvaluation.parseOpts(opts);
302 assertNotNull(options);
303 assertEquals(10, options.getMultiPut());
304 assertEquals(true, options.autoFlush);
307 @Test
308 public void testParseOptsConnCount() {
309 Queue<String> opts = new LinkedList<>();
310 String cmdName = "sequentialWrite";
311 opts.offer("--oneCon=true");
312 opts.offer("--connCount=10");
313 opts.offer(cmdName);
314 opts.offer("64");
315 PerformanceEvaluation.TestOptions options = null;
316 try {
317 options = PerformanceEvaluation.parseOpts(opts);
318 fail("should fail");
319 } catch (IllegalArgumentException e) {
320 System.out.println(e.getMessage());
323 opts = new LinkedList<>();
324 opts.offer("--connCount=10");
325 opts.offer(cmdName);
326 opts.offer("64");
328 options = PerformanceEvaluation.parseOpts(opts);
329 assertNotNull(options);
330 assertNotNull(options.getCmdName());
331 assertEquals(cmdName, options.getCmdName());
332 assertEquals(10, options.getConnCount());
335 @Test
336 public void testParseOptsValueRandom() {
337 Queue<String> opts = new LinkedList<>();
338 String cmdName = "sequentialWrite";
339 opts.offer("--valueRandom");
340 opts.offer("--valueZipf");
341 opts.offer(cmdName);
342 opts.offer("64");
343 PerformanceEvaluation.TestOptions options = null;
344 try {
345 options = PerformanceEvaluation.parseOpts(opts);
346 fail("should fail");
347 } catch (IllegalStateException e) {
348 System.out.println(e.getMessage());
351 opts = new LinkedList<>();
352 opts.offer("--valueRandom");
353 opts.offer(cmdName);
354 opts.offer("64");
356 options = PerformanceEvaluation.parseOpts(opts);
358 assertNotNull(options);
359 assertNotNull(options.getCmdName());
360 assertEquals(cmdName, options.getCmdName());
361 assertEquals(true, options.valueRandom);