2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertFalse
;
22 import static org
.junit
.Assert
.assertNotNull
;
23 import static org
.junit
.Assert
.assertTrue
;
24 import static org
.junit
.Assert
.fail
;
26 import com
.codahale
.metrics
.Histogram
;
27 import com
.codahale
.metrics
.Snapshot
;
28 import com
.codahale
.metrics
.UniformReservoir
;
29 import java
.io
.BufferedReader
;
30 import java
.io
.ByteArrayInputStream
;
31 import java
.io
.IOException
;
32 import java
.io
.InputStreamReader
;
33 import java
.lang
.reflect
.Constructor
;
34 import java
.lang
.reflect
.InvocationTargetException
;
35 import java
.nio
.charset
.StandardCharsets
;
36 import java
.util
.LinkedList
;
37 import java
.util
.NoSuchElementException
;
38 import java
.util
.Queue
;
39 import java
.util
.Random
;
40 import org
.apache
.hadoop
.fs
.FSDataInputStream
;
41 import org
.apache
.hadoop
.fs
.FileSystem
;
42 import org
.apache
.hadoop
.fs
.Path
;
43 import org
.apache
.hadoop
.hbase
.PerformanceEvaluation
.RandomReadTest
;
44 import org
.apache
.hadoop
.hbase
.PerformanceEvaluation
.TestOptions
;
45 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
46 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
47 import org
.apache
.hadoop
.hbase
.regionserver
.CompactingMemStore
;
48 import org
.apache
.hadoop
.hbase
.testclassification
.MiscTests
;
49 import org
.apache
.hadoop
.hbase
.testclassification
.SmallTests
;
50 import org
.apache
.hadoop
.hbase
.util
.GsonUtil
;
51 import org
.junit
.ClassRule
;
52 import org
.junit
.Test
;
53 import org
.junit
.experimental
.categories
.Category
;
55 import org
.apache
.hbase
.thirdparty
.com
.google
.gson
.Gson
;
57 @Category({MiscTests
.class, SmallTests
.class})
58 public class TestPerformanceEvaluation
{
60 public static final HBaseClassTestRule CLASS_RULE
=
61 HBaseClassTestRule
.forClass(TestPerformanceEvaluation
.class);
63 private static final HBaseTestingUtility HTU
= new HBaseTestingUtility();
66 public void testDefaultInMemoryCompaction() {
67 PerformanceEvaluation
.TestOptions defaultOpts
=
68 new PerformanceEvaluation
.TestOptions();
69 assertEquals(CompactingMemStore
.COMPACTING_MEMSTORE_TYPE_DEFAULT
,
70 defaultOpts
.getInMemoryCompaction().toString());
71 TableDescriptor tableDescriptor
= PerformanceEvaluation
.getTableDescriptor(defaultOpts
);
72 for (ColumnFamilyDescriptor familyDescriptor
: tableDescriptor
.getColumnFamilies()) {
73 assertEquals(CompactingMemStore
.COMPACTING_MEMSTORE_TYPE_DEFAULT
,
74 familyDescriptor
.getInMemoryCompaction().toString());
79 public void testSerialization() {
80 PerformanceEvaluation
.TestOptions options
= new PerformanceEvaluation
.TestOptions();
81 assertFalse(options
.isAutoFlush());
82 options
.setAutoFlush(true);
83 Gson gson
= GsonUtil
.createGson().create();
84 String optionsString
= gson
.toJson(options
);
85 PerformanceEvaluation
.TestOptions optionsDeserialized
=
86 gson
.fromJson(optionsString
, PerformanceEvaluation
.TestOptions
.class);
87 assertTrue(optionsDeserialized
.isAutoFlush());
91 * Exercise the mr spec writing. Simple assertions to make sure it is basically working.
94 public void testWriteInputFile() throws IOException
{
95 TestOptions opts
= new PerformanceEvaluation
.TestOptions();
96 final int clients
= 10;
97 opts
.setNumClientThreads(clients
);
98 opts
.setPerClientRunRows(10);
100 PerformanceEvaluation
.writeInputFile(HTU
.getConfiguration(), opts
, HTU
.getDataTestDir());
101 FileSystem fs
= FileSystem
.get(HTU
.getConfiguration());
102 Path p
= new Path(dir
, PerformanceEvaluation
.JOB_INPUT_FILENAME
);
103 long len
= fs
.getFileStatus(p
).getLen();
105 byte[] content
= new byte[(int) len
];
106 try (FSDataInputStream dis
= fs
.open(p
)) {
107 dis
.readFully(content
);
108 BufferedReader br
= new BufferedReader(
109 new InputStreamReader(new ByteArrayInputStream(content
), StandardCharsets
.UTF_8
));
111 while (br
.readLine() != null) {
114 assertEquals(clients
, count
);
119 public void testSizeCalculation() {
120 TestOptions opts
= new PerformanceEvaluation
.TestOptions();
121 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
122 int rows
= opts
.getPerClientRunRows();
124 final int defaultPerClientRunRows
= 1024 * 1024;
125 assertEquals(defaultPerClientRunRows
, rows
);
126 // If size is 2G, then twice the row count.
128 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
129 assertEquals(defaultPerClientRunRows
* 2, opts
.getPerClientRunRows());
130 // If two clients, then they get half the rows each.
131 opts
.setNumClientThreads(2);
132 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
133 assertEquals(defaultPerClientRunRows
, opts
.getPerClientRunRows());
134 // What if valueSize is 'random'? Then half of the valueSize so twice the rows.
135 opts
.valueRandom
= true;
136 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
137 assertEquals(defaultPerClientRunRows
* 2, opts
.getPerClientRunRows());
141 public void testRandomReadCalculation() {
142 TestOptions opts
= new PerformanceEvaluation
.TestOptions();
143 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
144 int rows
= opts
.getPerClientRunRows();
146 final int defaultPerClientRunRows
= 1024 * 1024;
147 assertEquals(defaultPerClientRunRows
, rows
);
148 // If size is 2G, then twice the row count.
150 opts
.setPerClientRunRows(1000);
151 opts
.setCmdName(PerformanceEvaluation
.RANDOM_READ
);
152 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
153 assertEquals(1000, opts
.getPerClientRunRows());
154 // If two clients, then they get half the rows each.
155 opts
.setNumClientThreads(2);
156 opts
= PerformanceEvaluation
.calculateRowsAndSize(opts
);
157 assertEquals(1000, opts
.getPerClientRunRows());
158 Random random
= new Random();
159 // assuming we will get one before this loop expires
160 boolean foundValue
= false;
161 for (int i
= 0; i
< 10000000; i
++) {
162 int randomRow
= PerformanceEvaluation
.generateRandomRow(random
, opts
.totalRows
);
163 if (randomRow
> 1000) {
168 assertTrue("We need to get a value more than 1000", foundValue
);
172 public void testZipfian() throws NoSuchMethodException
, SecurityException
, InstantiationException
,
173 IllegalAccessException
, IllegalArgumentException
, InvocationTargetException
{
174 TestOptions opts
= new PerformanceEvaluation
.TestOptions();
175 opts
.setValueZipf(true);
176 final int valueSize
= 1024;
177 opts
.setValueSize(valueSize
);
178 RandomReadTest rrt
= new RandomReadTest(null, opts
, null);
179 Constructor
<?
> ctor
=
180 Histogram
.class.getDeclaredConstructor(com
.codahale
.metrics
.Reservoir
.class);
181 ctor
.setAccessible(true);
182 Histogram histogram
= (Histogram
)ctor
.newInstance(new UniformReservoir(1024 * 500));
183 for (int i
= 0; i
< 100; i
++) {
184 histogram
.update(rrt
.getValueLength(null));
186 Snapshot snapshot
= histogram
.getSnapshot();
187 double stddev
= snapshot
.getStdDev();
188 assertTrue(stddev
!= 0 && stddev
!= 1.0);
189 assertTrue(snapshot
.getStdDev() != 0);
190 double median
= snapshot
.getMedian();
191 assertTrue(median
!= 0 && median
!= 1 && median
!= valueSize
);
195 public void testSetBufferSizeOption() {
196 TestOptions opts
= new PerformanceEvaluation
.TestOptions();
197 long bufferSize
= opts
.getBufferSize();
198 assertEquals(bufferSize
, 2L * 1024L * 1024L);
199 opts
.setBufferSize(64L * 1024L);
200 bufferSize
= opts
.getBufferSize();
201 assertEquals(bufferSize
, 64L * 1024L);
205 public void testParseOptsWithThreads() {
206 Queue
<String
> opts
= new LinkedList
<>();
207 String cmdName
= "sequentialWrite";
210 opts
.offer(String
.valueOf(threads
));
211 PerformanceEvaluation
.TestOptions options
= PerformanceEvaluation
.parseOpts(opts
);
212 assertNotNull(options
);
213 assertNotNull(options
.getCmdName());
214 assertEquals(cmdName
, options
.getCmdName());
215 assertEquals(threads
, options
.getNumClientThreads());
219 public void testParseOptsWrongThreads() {
220 Queue
<String
> opts
= new LinkedList
<>();
221 String cmdName
= "sequentialWrite";
225 PerformanceEvaluation
.parseOpts(opts
);
226 } catch (IllegalArgumentException e
) {
227 System
.out
.println(e
.getMessage());
228 assertEquals("Command " + cmdName
+ " does not have threads number", e
.getMessage());
229 assertTrue(e
.getCause() instanceof NumberFormatException
);
234 public void testParseOptsNoThreads() {
235 Queue
<String
> opts
= new LinkedList
<>();
236 String cmdName
= "sequentialWrite";
238 PerformanceEvaluation
.parseOpts(opts
);
239 } catch (IllegalArgumentException e
) {
240 System
.out
.println(e
.getMessage());
241 assertEquals("Command " + cmdName
+ " does not have threads number", e
.getMessage());
242 assertTrue(e
.getCause() instanceof NoSuchElementException
);
247 public void testParseOptsMultiPuts() {
248 Queue
<String
> opts
= new LinkedList
<>();
249 String cmdName
= "sequentialWrite";
250 opts
.offer("--multiPut=10");
253 PerformanceEvaluation
.TestOptions options
= null;
255 options
= PerformanceEvaluation
.parseOpts(opts
);
257 } catch (IllegalArgumentException e
) {
258 System
.out
.println(e
.getMessage());
260 ((LinkedList
<String
>) opts
).offerFirst("--multiPut=10");
261 ((LinkedList
<String
>) opts
).offerFirst("--autoFlush=true");
262 options
= PerformanceEvaluation
.parseOpts(opts
);
263 assertNotNull(options
);
264 assertNotNull(options
.getCmdName());
265 assertEquals(cmdName
, options
.getCmdName());
266 assertEquals(10, options
.getMultiPut());
270 public void testParseOptsConnCount() {
271 Queue
<String
> opts
= new LinkedList
<>();
272 String cmdName
= "sequentialWrite";
273 opts
.offer("--oneCon=true");
274 opts
.offer("--connCount=10");
277 PerformanceEvaluation
.TestOptions options
= null;
279 options
= PerformanceEvaluation
.parseOpts(opts
);
281 } catch (IllegalArgumentException e
) {
282 System
.out
.println(e
.getMessage());
284 ((LinkedList
<String
>) opts
).offerFirst("--connCount=10");
285 options
= PerformanceEvaluation
.parseOpts(opts
);
286 assertNotNull(options
);
287 assertNotNull(options
.getCmdName());
288 assertEquals(cmdName
, options
.getCmdName());
289 assertEquals(10, options
.getConnCount());