2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.tool
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertFalse
;
22 import static org
.junit
.Assert
.assertNotEquals
;
23 import static org
.junit
.Assert
.assertNotNull
;
24 import static org
.junit
.Assert
.assertTrue
;
25 import static org
.mockito
.ArgumentMatchers
.anyLong
;
26 import static org
.mockito
.ArgumentMatchers
.argThat
;
27 import static org
.mockito
.ArgumentMatchers
.eq
;
28 import static org
.mockito
.ArgumentMatchers
.isA
;
29 import static org
.mockito
.Mockito
.atLeastOnce
;
30 import static org
.mockito
.Mockito
.mock
;
31 import static org
.mockito
.Mockito
.never
;
32 import static org
.mockito
.Mockito
.spy
;
33 import static org
.mockito
.Mockito
.times
;
34 import static org
.mockito
.Mockito
.verify
;
35 import static org
.mockito
.Mockito
.when
;
37 import java
.util
.List
;
39 import java
.util
.concurrent
.ExecutorService
;
40 import java
.util
.concurrent
.ScheduledThreadPoolExecutor
;
41 import org
.apache
.hadoop
.conf
.Configuration
;
42 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
43 import org
.apache
.hadoop
.hbase
.HBaseConfiguration
;
44 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
45 import org
.apache
.hadoop
.hbase
.HConstants
;
46 import org
.apache
.hadoop
.hbase
.ServerName
;
47 import org
.apache
.hadoop
.hbase
.TableName
;
48 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
49 import org
.apache
.hadoop
.hbase
.client
.Put
;
50 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
51 import org
.apache
.hadoop
.hbase
.client
.Table
;
52 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
53 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
54 import org
.apache
.hadoop
.util
.ToolRunner
;
55 import org
.junit
.After
;
56 import org
.junit
.Before
;
57 import org
.junit
.ClassRule
;
58 import org
.junit
.Rule
;
59 import org
.junit
.Test
;
60 import org
.junit
.experimental
.categories
.Category
;
61 import org
.junit
.rules
.TestName
;
62 import org
.mockito
.ArgumentMatcher
;
64 @Category({ LargeTests
.class })
65 public class TestCanaryTool
{
68 public static final HBaseClassTestRule CLASS_RULE
=
69 HBaseClassTestRule
.forClass(TestCanaryTool
.class);
71 private HBaseTestingUtil testingUtility
;
72 private static final byte[] FAMILY
= Bytes
.toBytes("f");
73 private static final byte[] COLUMN
= Bytes
.toBytes("col");
76 public TestName name
= new TestName();
78 private org
.apache
.logging
.log4j
.core
.Appender mockAppender
;
81 public void setUp() throws Exception
{
82 testingUtility
= new HBaseTestingUtil();
83 testingUtility
.startMiniCluster();
84 mockAppender
= mock(org
.apache
.logging
.log4j
.core
.Appender
.class);
85 when(mockAppender
.getName()).thenReturn("mockAppender");
86 when(mockAppender
.isStarted()).thenReturn(true);
87 ((org
.apache
.logging
.log4j
.core
.Logger
) org
.apache
.logging
.log4j
.LogManager
88 .getLogger("org.apache.hadoop.hbase")).addAppender(mockAppender
);
92 public void tearDown() throws Exception
{
93 testingUtility
.shutdownMiniCluster();
94 ((org
.apache
.logging
.log4j
.core
.Logger
) org
.apache
.logging
.log4j
.LogManager
95 .getLogger("org.apache.hadoop.hbase")).removeAppender(mockAppender
);
99 public void testBasicZookeeperCanaryWorks() throws Exception
{
100 final String
[] args
= { "-t", "10000", "-zookeeper" };
101 testZookeeperCanaryWithArgs(args
);
105 public void testZookeeperCanaryPermittedFailuresArgumentWorks() throws Exception
{
106 final String
[] args
=
107 { "-t", "10000", "-zookeeper", "-treatFailureAsError", "-permittedZookeeperFailures", "1" };
108 testZookeeperCanaryWithArgs(args
);
112 public void testBasicCanaryWorks() throws Exception
{
113 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
114 Table table
= testingUtility
.createTable(tableName
, new byte[][] { FAMILY
});
115 // insert some test rows
116 for (int i
= 0; i
< 1000; i
++) {
117 byte[] iBytes
= Bytes
.toBytes(i
);
118 Put p
= new Put(iBytes
);
119 p
.addColumn(FAMILY
, COLUMN
, iBytes
);
122 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
123 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
124 CanaryTool canary
= new CanaryTool(executor
, sink
);
125 String
[] args
= { "-writeSniffing", "-t", "10000", tableName
.getNameAsString() };
126 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
127 assertEquals("verify no read error count", 0, canary
.getReadFailures().size());
128 assertEquals("verify no write error count", 0, canary
.getWriteFailures().size());
129 verify(sink
, atLeastOnce()).publishReadTiming(isA(ServerName
.class), isA(RegionInfo
.class),
130 isA(ColumnFamilyDescriptor
.class), anyLong());
134 public void testCanaryRegionTaskReadAllCF() throws Exception
{
135 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
136 Table table
= testingUtility
.createTable(tableName
,
137 new byte[][] { Bytes
.toBytes("f1"), Bytes
.toBytes("f2") });
138 // insert some test rows
139 for (int i
= 0; i
< 1000; i
++) {
140 byte[] iBytes
= Bytes
.toBytes(i
);
141 Put p
= new Put(iBytes
);
142 p
.addColumn(Bytes
.toBytes("f1"), COLUMN
, iBytes
);
143 p
.addColumn(Bytes
.toBytes("f2"), COLUMN
, iBytes
);
146 Configuration configuration
= HBaseConfiguration
.create(testingUtility
.getConfiguration());
147 String
[] args
= { "-t", "10000", "testCanaryRegionTaskReadAllCF" };
148 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
149 for (boolean readAllCF
: new boolean[] { true, false }) {
150 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
151 CanaryTool canary
= new CanaryTool(executor
, sink
);
152 configuration
.setBoolean(HConstants
.HBASE_CANARY_READ_ALL_CF
, readAllCF
);
153 assertEquals(0, ToolRunner
.run(configuration
, canary
, args
));
154 // the test table has two column family. If readAllCF set true,
155 // we expect read count is double of region count
156 int expectedReadCount
=
157 readAllCF ?
2 * sink
.getTotalExpectedRegions() : sink
.getTotalExpectedRegions();
158 assertEquals("canary region success count should equal total expected read count",
159 expectedReadCount
, sink
.getReadSuccessCount());
160 Map
<String
, List
<CanaryTool
.RegionTaskResult
>> regionMap
= sink
.getRegionMap();
161 assertFalse("verify region map has size > 0", regionMap
.isEmpty());
163 for (String regionName
: regionMap
.keySet()) {
164 for (CanaryTool
.RegionTaskResult res
: regionMap
.get(regionName
)) {
165 assertNotNull("verify getRegionNameAsString()", regionName
);
166 assertNotNull("verify getRegionInfo()", res
.getRegionInfo());
167 assertNotNull("verify getTableName()", res
.getTableName());
168 assertNotNull("verify getTableNameAsString()", res
.getTableNameAsString());
169 assertNotNull("verify getServerName()", res
.getServerName());
170 assertNotNull("verify getServerNameAsString()", res
.getServerNameAsString());
171 assertNotNull("verify getColumnFamily()", res
.getColumnFamily());
172 assertNotNull("verify getColumnFamilyNameAsString()", res
.getColumnFamilyNameAsString());
173 assertTrue("read from region " + regionName
+ " succeeded", res
.isReadSuccess());
174 assertTrue("read took some time", res
.getReadLatency() > -1);
181 public void testCanaryRegionTaskResult() throws Exception
{
182 TableName tableName
= TableName
.valueOf("testCanaryRegionTaskResult");
183 Table table
= testingUtility
.createTable(tableName
, new byte[][] { FAMILY
});
184 // insert some test rows
185 for (int i
= 0; i
< 1000; i
++) {
186 byte[] iBytes
= Bytes
.toBytes(i
);
187 Put p
= new Put(iBytes
);
188 p
.addColumn(FAMILY
, COLUMN
, iBytes
);
191 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
192 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
193 CanaryTool canary
= new CanaryTool(executor
, sink
);
194 String
[] args
= { "-writeSniffing", "-t", "10000", "testCanaryRegionTaskResult" };
195 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
197 assertTrue("canary should expect to scan at least 1 region",
198 sink
.getTotalExpectedRegions() > 0);
199 assertTrue("there should be no read failures", sink
.getReadFailureCount() == 0);
200 assertTrue("there should be no write failures", sink
.getWriteFailureCount() == 0);
201 assertTrue("verify read success count > 0", sink
.getReadSuccessCount() > 0);
202 assertTrue("verify write success count > 0", sink
.getWriteSuccessCount() > 0);
203 verify(sink
, atLeastOnce()).publishReadTiming(isA(ServerName
.class), isA(RegionInfo
.class),
204 isA(ColumnFamilyDescriptor
.class), anyLong());
205 verify(sink
, atLeastOnce()).publishWriteTiming(isA(ServerName
.class), isA(RegionInfo
.class),
206 isA(ColumnFamilyDescriptor
.class), anyLong());
208 assertEquals("canary region success count should equal total expected regions",
209 sink
.getReadSuccessCount() + sink
.getWriteSuccessCount(), sink
.getTotalExpectedRegions());
210 Map
<String
, List
<CanaryTool
.RegionTaskResult
>> regionMap
= sink
.getRegionMap();
211 assertFalse("verify region map has size > 0", regionMap
.isEmpty());
213 for (String regionName
: regionMap
.keySet()) {
214 for (CanaryTool
.RegionTaskResult res
: regionMap
.get(regionName
)) {
215 assertNotNull("verify getRegionNameAsString()", regionName
);
216 assertNotNull("verify getRegionInfo()", res
.getRegionInfo());
217 assertNotNull("verify getTableName()", res
.getTableName());
218 assertNotNull("verify getTableNameAsString()", res
.getTableNameAsString());
219 assertNotNull("verify getServerName()", res
.getServerName());
220 assertNotNull("verify getServerNameAsString()", res
.getServerNameAsString());
221 assertNotNull("verify getColumnFamily()", res
.getColumnFamily());
222 assertNotNull("verify getColumnFamilyNameAsString()", res
.getColumnFamilyNameAsString());
224 if (regionName
.contains(CanaryTool
.DEFAULT_WRITE_TABLE_NAME
.getNameAsString())) {
225 assertTrue("write to region " + regionName
+ " succeeded", res
.isWriteSuccess());
226 assertTrue("write took some time", res
.getWriteLatency() > -1);
228 assertTrue("read from region " + regionName
+ " succeeded", res
.isReadSuccess());
229 assertTrue("read took some time", res
.getReadLatency() > -1);
235 // Ignore this test. It fails w/ the below on some mac os x.
237 // [ERROR] TestCanaryTool.testReadTableTimeouts:216
238 // Argument(s) are different! Wanted:
239 // mockAppender.doAppend(
240 // <custom argument matcher>
242 // -> at org.apache.hadoop.hbase.tool.TestCanaryTool
243 // .testReadTableTimeouts(TestCanaryTool.java:216)
244 // Actual invocations have different arguments:
245 // mockAppender.doAppend(
246 // org.apache.log4j.spi.LoggingEvent@2055cfc1
253 public void testReadTableTimeouts() throws Exception
{
254 final TableName
[] tableNames
= new TableName
[] { TableName
.valueOf(name
.getMethodName() + "1"),
255 TableName
.valueOf(name
.getMethodName() + "2") };
256 // Create 2 test tables.
257 for (int j
= 0; j
< 2; j
++) {
258 Table table
= testingUtility
.createTable(tableNames
[j
], new byte[][] { FAMILY
});
259 // insert some test rows
260 for (int i
= 0; i
< 10; i
++) {
261 byte[] iBytes
= Bytes
.toBytes(i
+ j
);
262 Put p
= new Put(iBytes
);
263 p
.addColumn(FAMILY
, COLUMN
, iBytes
);
267 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
268 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
269 CanaryTool canary
= new CanaryTool(executor
, sink
);
270 String configuredTimeoutStr
= tableNames
[0].getNameAsString() + "=" + Long
.MAX_VALUE
+ "," +
271 tableNames
[1].getNameAsString() + "=0";
272 String
[] args
= { "-readTableTimeouts", configuredTimeoutStr
, name
.getMethodName() + "1",
273 name
.getMethodName() + "2" };
274 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
275 verify(sink
, times(tableNames
.length
)).initializeAndGetReadLatencyForTable(isA(String
.class));
276 for (int i
= 0; i
< 2; i
++) {
277 assertNotEquals("verify non-null read latency", null,
278 sink
.getReadLatencyMap().get(tableNames
[i
].getNameAsString()));
279 assertNotEquals("verify non-zero read latency", 0L,
280 sink
.getReadLatencyMap().get(tableNames
[i
].getNameAsString()));
282 // One table's timeout is set for 0 ms and thus, should lead to an error.
283 verify(mockAppender
, times(1))
284 .append(argThat(new ArgumentMatcher
<org
.apache
.logging
.log4j
.core
.LogEvent
>() {
286 public boolean matches(org
.apache
.logging
.log4j
.core
.LogEvent argument
) {
287 return argument
.getMessage().getFormattedMessage()
288 .contains("exceeded the configured read timeout.");
291 verify(mockAppender
, times(2))
292 .append(argThat(new ArgumentMatcher
<org
.apache
.logging
.log4j
.core
.LogEvent
>() {
294 public boolean matches(org
.apache
.logging
.log4j
.core
.LogEvent argument
) {
295 return argument
.getMessage().getFormattedMessage().contains("Configured read timeout");
301 public void testWriteTableTimeout() throws Exception
{
302 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
303 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
304 CanaryTool canary
= new CanaryTool(executor
, sink
);
305 String
[] args
= { "-writeSniffing", "-writeTableTimeout", String
.valueOf(Long
.MAX_VALUE
) };
306 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
307 assertNotEquals("verify non-null write latency", null, sink
.getWriteLatency());
308 assertNotEquals("verify non-zero write latency", 0L, sink
.getWriteLatency());
309 verify(mockAppender
, times(1))
310 .append(argThat(new ArgumentMatcher
<org
.apache
.logging
.log4j
.core
.LogEvent
>() {
312 public boolean matches(org
.apache
.logging
.log4j
.core
.LogEvent argument
) {
313 return argument
.getMessage().getFormattedMessage().contains("Configured write timeout");
318 // no table created, so there should be no regions
320 public void testRegionserverNoRegions() throws Exception
{
321 runRegionserverCanary();
323 .append(argThat(new ArgumentMatcher
<org
.apache
.logging
.log4j
.core
.LogEvent
>() {
325 public boolean matches(org
.apache
.logging
.log4j
.core
.LogEvent argument
) {
326 return argument
.getMessage().getFormattedMessage()
327 .contains("Regionserver not serving any regions");
332 // by creating a table, there shouldn't be any region servers not serving any regions
334 public void testRegionserverWithRegions() throws Exception
{
335 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
336 testingUtility
.createTable(tableName
, new byte[][] { FAMILY
});
337 runRegionserverCanary();
338 verify(mockAppender
, never())
339 .append(argThat(new ArgumentMatcher
<org
.apache
.logging
.log4j
.core
.LogEvent
>() {
341 public boolean matches(org
.apache
.logging
.log4j
.core
.LogEvent argument
) {
342 return argument
.getMessage().getFormattedMessage()
343 .contains("Regionserver not serving any regions");
349 public void testRawScanConfig() throws Exception
{
350 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
351 Table table
= testingUtility
.createTable(tableName
, new byte[][] { FAMILY
});
352 // insert some test rows
353 for (int i
= 0; i
< 1000; i
++) {
354 byte[] iBytes
= Bytes
.toBytes(i
);
355 Put p
= new Put(iBytes
);
356 p
.addColumn(FAMILY
, COLUMN
, iBytes
);
359 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
360 CanaryTool
.RegionStdOutSink sink
= spy(new CanaryTool
.RegionStdOutSink());
361 CanaryTool canary
= new CanaryTool(executor
, sink
);
362 String
[] args
= { "-t", "10000", name
.getMethodName() };
363 org
.apache
.hadoop
.conf
.Configuration conf
=
364 new org
.apache
.hadoop
.conf
.Configuration(testingUtility
.getConfiguration());
365 conf
.setBoolean(HConstants
.HBASE_CANARY_READ_RAW_SCAN_KEY
, true);
366 assertEquals(0, ToolRunner
.run(conf
, canary
, args
));
367 verify(sink
, atLeastOnce()).publishReadTiming(isA(ServerName
.class), isA(RegionInfo
.class),
368 isA(ColumnFamilyDescriptor
.class), anyLong());
369 assertEquals("verify no read error count", 0, canary
.getReadFailures().size());
372 private void runRegionserverCanary() throws Exception
{
373 ExecutorService executor
= new ScheduledThreadPoolExecutor(1);
374 CanaryTool canary
= new CanaryTool(executor
, new CanaryTool
.RegionServerStdOutSink());
375 String
[] args
= { "-t", "10000", "-regionserver" };
376 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
377 assertEquals("verify no read error count", 0, canary
.getReadFailures().size());
380 private void testZookeeperCanaryWithArgs(String
[] args
) throws Exception
{
381 String hostPort
= testingUtility
.getZkCluster().getAddress().toString();
382 testingUtility
.getConfiguration().set(HConstants
.ZOOKEEPER_QUORUM
, hostPort
);
383 ExecutorService executor
= new ScheduledThreadPoolExecutor(2);
384 CanaryTool
.ZookeeperStdOutSink sink
= spy(new CanaryTool
.ZookeeperStdOutSink());
385 CanaryTool canary
= new CanaryTool(executor
, sink
);
386 assertEquals(0, ToolRunner
.run(testingUtility
.getConfiguration(), canary
, args
));
388 String baseZnode
= testingUtility
.getConfiguration().get(HConstants
.ZOOKEEPER_ZNODE_PARENT
,
389 HConstants
.DEFAULT_ZOOKEEPER_ZNODE_PARENT
);
390 verify(sink
, atLeastOnce()).publishReadTiming(eq(baseZnode
), eq(hostPort
), anyLong());