2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertTrue
;
22 import static org
.junit
.Assert
.fail
;
24 import edu
.umd
.cs
.findbugs
.annotations
.Nullable
;
26 import java
.io
.IOException
;
27 import java
.io
.OutputStream
;
28 import java
.lang
.reflect
.Field
;
29 import java
.lang
.reflect
.Modifier
;
30 import java
.net
.BindException
;
31 import java
.net
.DatagramSocket
;
32 import java
.net
.InetAddress
;
33 import java
.net
.ServerSocket
;
34 import java
.net
.Socket
;
35 import java
.net
.UnknownHostException
;
36 import java
.nio
.charset
.StandardCharsets
;
37 import java
.security
.MessageDigest
;
38 import java
.util
.ArrayList
;
39 import java
.util
.Arrays
;
40 import java
.util
.Collection
;
41 import java
.util
.Collections
;
42 import java
.util
.HashSet
;
43 import java
.util
.Iterator
;
44 import java
.util
.List
;
46 import java
.util
.NavigableSet
;
47 import java
.util
.Properties
;
48 import java
.util
.Random
;
50 import java
.util
.TreeSet
;
51 import java
.util
.concurrent
.TimeUnit
;
52 import java
.util
.concurrent
.atomic
.AtomicReference
;
53 import org
.apache
.commons
.io
.FileUtils
;
54 import org
.apache
.commons
.lang3
.RandomStringUtils
;
55 import org
.apache
.commons
.logging
.impl
.Jdk14Logger
;
56 import org
.apache
.commons
.logging
.impl
.Log4JLogger
;
57 import org
.apache
.hadoop
.conf
.Configuration
;
58 import org
.apache
.hadoop
.fs
.FileSystem
;
59 import org
.apache
.hadoop
.fs
.Path
;
60 import org
.apache
.hadoop
.hbase
.Waiter
.ExplainingPredicate
;
61 import org
.apache
.hadoop
.hbase
.Waiter
.Predicate
;
62 import org
.apache
.hadoop
.hbase
.client
.Admin
;
63 import org
.apache
.hadoop
.hbase
.client
.AsyncClusterConnection
;
64 import org
.apache
.hadoop
.hbase
.client
.BufferedMutator
;
65 import org
.apache
.hadoop
.hbase
.client
.ClusterConnectionFactory
;
66 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
67 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
68 import org
.apache
.hadoop
.hbase
.client
.Connection
;
69 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
70 import org
.apache
.hadoop
.hbase
.client
.Consistency
;
71 import org
.apache
.hadoop
.hbase
.client
.Delete
;
72 import org
.apache
.hadoop
.hbase
.client
.Durability
;
73 import org
.apache
.hadoop
.hbase
.client
.Get
;
74 import org
.apache
.hadoop
.hbase
.client
.Hbck
;
75 import org
.apache
.hadoop
.hbase
.client
.MasterRegistry
;
76 import org
.apache
.hadoop
.hbase
.client
.Put
;
77 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
78 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
79 import org
.apache
.hadoop
.hbase
.client
.RegionLocator
;
80 import org
.apache
.hadoop
.hbase
.client
.Result
;
81 import org
.apache
.hadoop
.hbase
.client
.ResultScanner
;
82 import org
.apache
.hadoop
.hbase
.client
.Scan
;
83 import org
.apache
.hadoop
.hbase
.client
.Table
;
84 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
85 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
86 import org
.apache
.hadoop
.hbase
.client
.TableState
;
87 import org
.apache
.hadoop
.hbase
.fs
.HFileSystem
;
88 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
;
89 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
.Algorithm
;
90 import org
.apache
.hadoop
.hbase
.io
.encoding
.DataBlockEncoding
;
91 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCache
;
92 import org
.apache
.hadoop
.hbase
.io
.hfile
.ChecksumUtil
;
93 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFile
;
94 import org
.apache
.hadoop
.hbase
.ipc
.RpcServerInterface
;
95 import org
.apache
.hadoop
.hbase
.ipc
.ServerNotRunningYetException
;
96 import org
.apache
.hadoop
.hbase
.mapreduce
.MapreduceTestingShim
;
97 import org
.apache
.hadoop
.hbase
.master
.HMaster
;
98 import org
.apache
.hadoop
.hbase
.master
.RegionState
;
99 import org
.apache
.hadoop
.hbase
.master
.ServerManager
;
100 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentManager
;
101 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentTestingUtil
;
102 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStateStore
;
103 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStates
;
104 import org
.apache
.hadoop
.hbase
.mob
.MobFileCache
;
105 import org
.apache
.hadoop
.hbase
.regionserver
.BloomType
;
106 import org
.apache
.hadoop
.hbase
.regionserver
.ChunkCreator
;
107 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
;
108 import org
.apache
.hadoop
.hbase
.regionserver
.HRegionServer
;
109 import org
.apache
.hadoop
.hbase
.regionserver
.HStore
;
110 import org
.apache
.hadoop
.hbase
.regionserver
.InternalScanner
;
111 import org
.apache
.hadoop
.hbase
.regionserver
.MemStoreLABImpl
;
112 import org
.apache
.hadoop
.hbase
.regionserver
.Region
;
113 import org
.apache
.hadoop
.hbase
.regionserver
.RegionScanner
;
114 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerServices
;
115 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerStoppedException
;
116 import org
.apache
.hadoop
.hbase
.security
.HBaseKerberosUtils
;
117 import org
.apache
.hadoop
.hbase
.security
.User
;
118 import org
.apache
.hadoop
.hbase
.security
.UserProvider
;
119 import org
.apache
.hadoop
.hbase
.security
.visibility
.VisibilityLabelsCache
;
120 import org
.apache
.hadoop
.hbase
.trace
.TraceUtil
;
121 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
122 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
123 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
124 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
;
125 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.MasterThread
;
126 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.RegionServerThread
;
127 import org
.apache
.hadoop
.hbase
.util
.Pair
;
128 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
;
129 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
.SplitAlgorithm
;
130 import org
.apache
.hadoop
.hbase
.util
.RetryCounter
;
131 import org
.apache
.hadoop
.hbase
.util
.Threads
;
132 import org
.apache
.hadoop
.hbase
.wal
.WAL
;
133 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
134 import org
.apache
.hadoop
.hbase
.zookeeper
.EmptyWatcher
;
135 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKConfig
;
136 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKWatcher
;
137 import org
.apache
.hadoop
.hdfs
.DFSClient
;
138 import org
.apache
.hadoop
.hdfs
.DistributedFileSystem
;
139 import org
.apache
.hadoop
.hdfs
.MiniDFSCluster
;
140 import org
.apache
.hadoop
.hdfs
.server
.namenode
.EditLogFileOutputStream
;
141 import org
.apache
.hadoop
.mapred
.JobConf
;
142 import org
.apache
.hadoop
.mapred
.MiniMRCluster
;
143 import org
.apache
.hadoop
.mapred
.TaskLog
;
144 import org
.apache
.hadoop
.minikdc
.MiniKdc
;
145 import org
.apache
.log4j
.LogManager
;
146 import org
.apache
.yetus
.audience
.InterfaceAudience
;
147 import org
.apache
.zookeeper
.WatchedEvent
;
148 import org
.apache
.zookeeper
.ZooKeeper
;
149 import org
.apache
.zookeeper
.ZooKeeper
.States
;
150 import org
.slf4j
.Logger
;
151 import org
.slf4j
.LoggerFactory
;
152 import org
.slf4j
.impl
.Log4jLoggerAdapter
;
154 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.io
.Closeables
;
156 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
159 * Facility for testing HBase. Replacement for
160 * old HBaseTestCase and HBaseClusterTestCase functionality.
161 * Create an instance and keep it around testing HBase. This class is
162 * meant to be your one-stop shop for anything you might need testing. Manages
163 * one cluster at a time only. Managed cluster can be an in-process
164 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
165 * Not all methods work with the real cluster.
166 * Depends on log4j being on classpath and
167 * hbase-site.xml for logging and test-run configuration. It does not set
169 * In the configuration properties, default values for master-info-port and
170 * region-server-port are overridden such that a random port will be assigned (thus
171 * avoiding port contention if another local HBase instance is already running).
172 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
173 * setting it to true.
175 @InterfaceAudience.Public
176 @SuppressWarnings("deprecation")
177 public class HBaseTestingUtility
extends HBaseZKTestingUtility
{
180 * System property key to get test directory value. Name is as it is because mini dfs has
181 * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
183 * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
184 * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
187 private static final String TEST_DIRECTORY_KEY
= "test.build.data";
189 public static final String REGIONS_PER_SERVER_KEY
= "hbase.test.regions-per-server";
191 * The default number of regions per regionserver when creating a pre-split
194 public static final int DEFAULT_REGIONS_PER_SERVER
= 3;
197 public static final String PRESPLIT_TEST_TABLE_KEY
= "hbase.test.pre-split-table";
198 public static final boolean PRESPLIT_TEST_TABLE
= true;
200 private MiniDFSCluster dfsCluster
= null;
202 private volatile HBaseCluster hbaseCluster
= null;
203 private MiniMRCluster mrCluster
= null;
205 /** If there is a mini cluster running for this testing utility instance. */
206 private volatile boolean miniClusterRunning
;
208 private String hadoopLogDir
;
210 /** Directory on test filesystem where we put the data for this instance of
211 * HBaseTestingUtility*/
212 private Path dataTestDirOnTestFS
= null;
214 private volatile AsyncClusterConnection asyncConnection
;
216 /** Filesystem URI used for map-reduce mini-cluster setup */
217 private static String FS_URI
;
219 /** This is for unit tests parameterized with a single boolean. */
220 public static final List
<Object
[]> MEMSTORETS_TAGS_PARAMETRIZED
= memStoreTSAndTagsCombination();
223 * Checks to see if a specific port is available.
225 * @param port the port number to check for availability
226 * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
228 public static boolean available(int port
) {
229 ServerSocket ss
= null;
230 DatagramSocket ds
= null;
232 ss
= new ServerSocket(port
);
233 ss
.setReuseAddress(true);
234 ds
= new DatagramSocket(port
);
235 ds
.setReuseAddress(true);
237 } catch (IOException e
) {
247 } catch (IOException e
) {
248 /* should not be thrown */
257 * Create all combinations of Bloom filters and compression algorithms for
260 private static List
<Object
[]> bloomAndCompressionCombinations() {
261 List
<Object
[]> configurations
= new ArrayList
<>();
262 for (Compression
.Algorithm comprAlgo
:
263 HBaseCommonTestingUtility
.COMPRESSION_ALGORITHMS
) {
264 for (BloomType bloomType
: BloomType
.values()) {
265 configurations
.add(new Object
[] { comprAlgo
, bloomType
});
268 return Collections
.unmodifiableList(configurations
);
272 * Create combination of memstoreTS and tags
274 private static List
<Object
[]> memStoreTSAndTagsCombination() {
275 List
<Object
[]> configurations
= new ArrayList
<>();
276 configurations
.add(new Object
[] { false, false });
277 configurations
.add(new Object
[] { false, true });
278 configurations
.add(new Object
[] { true, false });
279 configurations
.add(new Object
[] { true, true });
280 return Collections
.unmodifiableList(configurations
);
283 public static List
<Object
[]> memStoreTSTagsAndOffheapCombination() {
284 List
<Object
[]> configurations
= new ArrayList
<>();
285 configurations
.add(new Object
[] { false, false, true });
286 configurations
.add(new Object
[] { false, false, false });
287 configurations
.add(new Object
[] { false, true, true });
288 configurations
.add(new Object
[] { false, true, false });
289 configurations
.add(new Object
[] { true, false, true });
290 configurations
.add(new Object
[] { true, false, false });
291 configurations
.add(new Object
[] { true, true, true });
292 configurations
.add(new Object
[] { true, true, false });
293 return Collections
.unmodifiableList(configurations
);
296 public static final Collection
<Object
[]> BLOOM_AND_COMPRESSION_COMBINATIONS
=
297 bloomAndCompressionCombinations();
301 * <p>Create an HBaseTestingUtility using a default configuration.
303 * <p>Initially, all tmp files are written to a local test data directory.
304 * Once {@link #startMiniDFSCluster} is called, either directly or via
305 * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
307 public HBaseTestingUtility() {
308 this(HBaseConfiguration
.create());
312 * <p>Create an HBaseTestingUtility using a given configuration.
314 * <p>Initially, all tmp files are written to a local test data directory.
315 * Once {@link #startMiniDFSCluster} is called, either directly or via
316 * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
318 * @param conf The configuration to use for further operations
320 public HBaseTestingUtility(@Nullable Configuration conf
) {
323 // a hbase checksum verification failure will cause unit tests to fail
324 ChecksumUtil
.generateExceptionForChecksumFailureForTest(true);
326 // Save this for when setting default file:// breaks things
327 if (this.conf
.get("fs.defaultFS") != null) {
328 this.conf
.set("original.defaultFS", this.conf
.get("fs.defaultFS"));
330 if (this.conf
.get(HConstants
.HBASE_DIR
) != null) {
331 this.conf
.set("original.hbase.dir", this.conf
.get(HConstants
.HBASE_DIR
));
333 // Every cluster is a local cluster until we start DFS
334 // Note that conf could be null, but this.conf will not be
335 String dataTestDir
= getDataTestDir().toString();
336 this.conf
.set("fs.defaultFS","file:///");
337 this.conf
.set(HConstants
.HBASE_DIR
, "file://" + dataTestDir
);
338 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
339 this.conf
.setBoolean(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
,false);
340 // If the value for random ports isn't set set it to true, thus making
341 // tests opt-out for random port assignment
342 this.conf
.setBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
,
343 this.conf
.getBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
, true));
347 * Close both the region {@code r} and it's underlying WAL. For use in tests.
349 public static void closeRegionAndWAL(final Region r
) throws IOException
{
350 closeRegionAndWAL((HRegion
)r
);
354 * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
356 public static void closeRegionAndWAL(final HRegion r
) throws IOException
{
357 if (r
== null) return;
359 if (r
.getWAL() == null) return;
364 * Returns this classes's instance of {@link Configuration}. Be careful how
365 * you use the returned Configuration since {@link Connection} instances
366 * can be shared. The Map of Connections is keyed by the Configuration. If
367 * say, a Connection was being used against a cluster that had been shutdown,
368 * see {@link #shutdownMiniCluster()}, then the Connection will no longer
369 * be wholesome. Rather than use the return direct, its usually best to
370 * make a copy and use that. Do
371 * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
372 * @return Instance of Configuration.
375 public Configuration
getConfiguration() {
376 return super.getConfiguration();
379 public void setHBaseCluster(HBaseCluster hbaseCluster
) {
380 this.hbaseCluster
= hbaseCluster
;
384 * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
385 * Give it a random name so can have many concurrent tests running if
386 * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY}
387 * System property, as it's what minidfscluster bases
388 * it data dir on. Moding a System property is not the way to do concurrent
389 * instances -- another instance could grab the temporary
390 * value unintentionally -- but not anything can do about it at moment;
391 * single instance only is how the minidfscluster works.
393 * We also create the underlying directory names for
394 * hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
395 * in the conf, and as a system property for hadoop.tmp.dir (We do not create them!).
397 * @return The calculated data test build directory, if newly-created.
400 protected Path
setupDataTestDir() {
401 Path testPath
= super.setupDataTestDir();
402 if (null == testPath
) {
406 createSubDirAndSystemProperty(
408 testPath
, "hadoop-log-dir");
410 // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
411 // we want our own value to ensure uniqueness on the same machine
412 createSubDirAndSystemProperty(
414 testPath
, "hadoop-tmp-dir");
416 // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
418 "mapreduce.cluster.local.dir",
419 testPath
, "mapred-local-dir");
424 private void createSubDirAndSystemProperty(
425 String propertyName
, Path parent
, String subDirName
){
427 String sysValue
= System
.getProperty(propertyName
);
429 if (sysValue
!= null) {
430 // There is already a value set. So we do nothing but hope
431 // that there will be no conflicts
432 LOG
.info("System.getProperty(\""+propertyName
+"\") already set to: "+
433 sysValue
+ " so I do NOT create it in " + parent
);
434 String confValue
= conf
.get(propertyName
);
435 if (confValue
!= null && !confValue
.endsWith(sysValue
)){
437 propertyName
+ " property value differs in configuration and system: "+
438 "Configuration="+confValue
+" while System="+sysValue
+
439 " Erasing configuration value by system value."
442 conf
.set(propertyName
, sysValue
);
444 // Ok, it's not set, so we create it as a subdirectory
445 createSubDir(propertyName
, parent
, subDirName
);
446 System
.setProperty(propertyName
, conf
.get(propertyName
));
451 * @return Where to write test data on the test filesystem; Returns working directory
452 * for the test filesystem by default
453 * @see #setupDataTestDirOnTestFS()
454 * @see #getTestFileSystem()
456 private Path
getBaseTestDirOnTestFS() throws IOException
{
457 FileSystem fs
= getTestFileSystem();
458 return new Path(fs
.getWorkingDirectory(), "test-data");
462 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
463 * to write temporary test data. Call this method after setting up the mini dfs cluster
464 * if the test relies on it.
465 * @return a unique path in the test filesystem
467 public Path
getDataTestDirOnTestFS() throws IOException
{
468 if (dataTestDirOnTestFS
== null) {
469 setupDataTestDirOnTestFS();
472 return dataTestDirOnTestFS
;
476 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
477 * to write temporary test data. Call this method after setting up the mini dfs cluster
478 * if the test relies on it.
479 * @return a unique path in the test filesystem
480 * @param subdirName name of the subdir to create under the base test dir
482 public Path
getDataTestDirOnTestFS(final String subdirName
) throws IOException
{
483 return new Path(getDataTestDirOnTestFS(), subdirName
);
487 * Sets up a path in test filesystem to be used by tests.
488 * Creates a new directory if not already setup.
490 private void setupDataTestDirOnTestFS() throws IOException
{
491 if (dataTestDirOnTestFS
!= null) {
492 LOG
.warn("Data test on test fs dir already setup in "
493 + dataTestDirOnTestFS
.toString());
496 dataTestDirOnTestFS
= getNewDataTestDirOnTestFS();
500 * Sets up a new path in test filesystem to be used by tests.
502 private Path
getNewDataTestDirOnTestFS() throws IOException
{
503 //The file system can be either local, mini dfs, or if the configuration
504 //is supplied externally, it can be an external cluster FS. If it is a local
505 //file system, the tests should use getBaseTestDir, otherwise, we can use
506 //the working directory, and create a unique sub dir there
507 FileSystem fs
= getTestFileSystem();
509 String randomStr
= getRandomUUID().toString();
510 if (fs
.getUri().getScheme().equals(FileSystem
.getLocal(conf
).getUri().getScheme())) {
511 newDataTestDir
= new Path(getDataTestDir(), randomStr
);
512 File dataTestDir
= new File(newDataTestDir
.toString());
513 if (deleteOnExit()) dataTestDir
.deleteOnExit();
515 Path base
= getBaseTestDirOnTestFS();
516 newDataTestDir
= new Path(base
, randomStr
);
517 if (deleteOnExit()) fs
.deleteOnExit(newDataTestDir
);
519 return newDataTestDir
;
523 * Cleans the test data directory on the test filesystem.
524 * @return True if we removed the test dirs
525 * @throws IOException
527 public boolean cleanupDataTestDirOnTestFS() throws IOException
{
528 boolean ret
= getTestFileSystem().delete(dataTestDirOnTestFS
, true);
530 dataTestDirOnTestFS
= null;
535 * Cleans a subdirectory under the test data directory on the test filesystem.
536 * @return True if we removed child
537 * @throws IOException
539 public boolean cleanupDataTestDirOnTestFS(String subdirName
) throws IOException
{
540 Path cpath
= getDataTestDirOnTestFS(subdirName
);
541 return getTestFileSystem().delete(cpath
, true);
545 * Start a minidfscluster.
546 * @param servers How many DNs to start.
548 * @see #shutdownMiniDFSCluster()
549 * @return The mini dfs cluster created.
551 public MiniDFSCluster
startMiniDFSCluster(int servers
) throws Exception
{
552 return startMiniDFSCluster(servers
, null);
556 * Start a minidfscluster.
557 * This is useful if you want to run datanode on distinct hosts for things
558 * like HDFS block location verification.
559 * If you start MiniDFSCluster without host names, all instances of the
560 * datanodes will have the same host name.
561 * @param hosts hostnames DNs to run on.
563 * @see #shutdownMiniDFSCluster()
564 * @return The mini dfs cluster created.
566 public MiniDFSCluster
startMiniDFSCluster(final String hosts
[])
568 if ( hosts
!= null && hosts
.length
!= 0) {
569 return startMiniDFSCluster(hosts
.length
, hosts
);
571 return startMiniDFSCluster(1, null);
576 * Start a minidfscluster.
577 * Can only create one.
578 * @param servers How many DNs to start.
579 * @param hosts hostnames DNs to run on.
581 * @see #shutdownMiniDFSCluster()
582 * @return The mini dfs cluster created.
584 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String hosts
[])
586 return startMiniDFSCluster(servers
, null, hosts
);
589 private void setFs() throws IOException
{
590 if(this.dfsCluster
== null){
591 LOG
.info("Skipping setting fs because dfsCluster is null");
594 FileSystem fs
= this.dfsCluster
.getFileSystem();
595 FSUtils
.setFsDefault(this.conf
, new Path(fs
.getUri()));
597 // re-enable this check with dfs
598 conf
.unset(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
);
601 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String racks
[], String hosts
[])
603 createDirsAndSetProperties();
604 EditLogFileOutputStream
.setShouldSkipFsyncForTesting(true);
606 // Error level to skip some warnings specific to the minicluster. See HBASE-4709
607 org
.apache
.log4j
.Logger
.getLogger(org
.apache
.hadoop
.metrics2
.util
.MBeans
.class).
608 setLevel(org
.apache
.log4j
.Level
.ERROR
);
609 org
.apache
.log4j
.Logger
.getLogger(org
.apache
.hadoop
.metrics2
.impl
.MetricsSystemImpl
.class).
610 setLevel(org
.apache
.log4j
.Level
.ERROR
);
612 TraceUtil
.initTracer(conf
);
614 this.dfsCluster
= new MiniDFSCluster(0, this.conf
, servers
, true, true,
615 true, null, racks
, hosts
, null);
617 // Set this just-started cluster as our filesystem.
620 // Wait for the cluster to be totally up
621 this.dfsCluster
.waitClusterUp();
623 //reset the test directory for test file system
624 dataTestDirOnTestFS
= null;
625 String dataTestDir
= getDataTestDir().toString();
626 conf
.set(HConstants
.HBASE_DIR
, dataTestDir
);
627 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
629 return this.dfsCluster
;
632 public MiniDFSCluster
startMiniDFSClusterForTestWAL(int namenodePort
) throws IOException
{
633 createDirsAndSetProperties();
634 dfsCluster
= new MiniDFSCluster(namenodePort
, conf
, 5, false, true, true, null,
639 /** This is used before starting HDFS and map-reduce mini-clusters */
640 private void createDirsAndSetProperties() throws IOException
{
641 setupClusterTestDir();
642 conf
.set(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
643 System
.setProperty(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
644 createDirAndSetProperty("cache_data", "test.cache.data");
645 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
646 hadoopLogDir
= createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
647 createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
648 createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
649 enableShortCircuit();
651 Path root
= getDataTestDirOnTestFS("hadoop");
652 conf
.set(MapreduceTestingShim
.getMROutputDirProp(),
653 new Path(root
, "mapred-output-dir").toString());
654 conf
.set("mapreduce.jobtracker.system.dir", new Path(root
, "mapred-system-dir").toString());
655 conf
.set("mapreduce.jobtracker.staging.root.dir",
656 new Path(root
, "mapreduce-jobtracker-staging-root-dir").toString());
657 conf
.set("mapreduce.job.working.dir", new Path(root
, "mapred-working-dir").toString());
658 conf
.set("yarn.app.mapreduce.am.staging-dir",
659 new Path(root
, "mapreduce-am-staging-root-dir").toString());
663 * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
664 * new column families. Default to false.
666 public boolean isNewVersionBehaviorEnabled(){
667 final String propName
= "hbase.tests.new.version.behavior";
668 String v
= System
.getProperty(propName
);
670 return Boolean
.parseBoolean(v
);
676 * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
677 * This allows to specify this parameter on the command line.
678 * If not set, default is true.
680 public boolean isReadShortCircuitOn(){
681 final String propName
= "hbase.tests.use.shortcircuit.reads";
682 String readOnProp
= System
.getProperty(propName
);
683 if (readOnProp
!= null){
684 return Boolean
.parseBoolean(readOnProp
);
686 return conf
.getBoolean(propName
, false);
690 /** Enable the short circuit read, unless configured differently.
691 * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
693 private void enableShortCircuit() {
694 if (isReadShortCircuitOn()) {
695 String curUser
= System
.getProperty("user.name");
696 LOG
.info("read short circuit is ON for user " + curUser
);
697 // read short circuit, for hdfs
698 conf
.set("dfs.block.local-path-access.user", curUser
);
699 // read short circuit, for hbase
700 conf
.setBoolean("dfs.client.read.shortcircuit", true);
701 // Skip checking checksum, for the hdfs client and the datanode
702 conf
.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
704 LOG
.info("read short circuit is OFF");
708 private String
createDirAndSetProperty(final String relPath
, String property
) {
709 String path
= getDataTestDir(relPath
).toString();
710 System
.setProperty(property
, path
);
711 conf
.set(property
, path
);
712 new File(path
).mkdirs();
713 LOG
.info("Setting " + property
+ " to " + path
+ " in system properties and HBase conf");
718 * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
720 * @throws IOException
722 public void shutdownMiniDFSCluster() throws IOException
{
723 if (this.dfsCluster
!= null) {
724 // The below throws an exception per dn, AsynchronousCloseException.
725 this.dfsCluster
.shutdown();
727 dataTestDirOnTestFS
= null;
728 FSUtils
.setFsDefault(this.conf
, new Path("file:///"));
733 * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
734 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
735 * @param createWALDir Whether to create a new WAL directory.
736 * @return The mini HBase cluster created.
737 * @see #shutdownMiniCluster()
738 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
739 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
740 * @see #startMiniCluster(StartMiniClusterOption)
741 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
744 public MiniHBaseCluster
startMiniCluster(boolean createWALDir
) throws Exception
{
745 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
746 .createWALDir(createWALDir
).build();
747 return startMiniCluster(option
);
751 * Start up a minicluster of hbase, dfs, and zookeeper.
752 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
753 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
754 * @param createRootDir Whether to create a new root or data directory path.
755 * @return The mini HBase cluster created.
756 * @see #shutdownMiniCluster()
757 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
758 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
759 * @see #startMiniCluster(StartMiniClusterOption)
760 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
763 public MiniHBaseCluster
startMiniCluster(int numSlaves
, boolean createRootDir
)
765 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
766 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).createRootDir(createRootDir
).build();
767 return startMiniCluster(option
);
771 * Start up a minicluster of hbase, dfs, and zookeeper.
772 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
773 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
774 * @param createRootDir Whether to create a new root or data directory path.
775 * @param createWALDir Whether to create a new WAL directory.
776 * @return The mini HBase cluster created.
777 * @see #shutdownMiniCluster()
778 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
779 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
780 * @see #startMiniCluster(StartMiniClusterOption)
781 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
784 public MiniHBaseCluster
startMiniCluster(int numSlaves
, boolean createRootDir
,
785 boolean createWALDir
) throws Exception
{
786 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
787 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).createRootDir(createRootDir
)
788 .createWALDir(createWALDir
).build();
789 return startMiniCluster(option
);
793 * Start up a minicluster of hbase, dfs, and zookeeper.
794 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
795 * @param numMasters Master node number.
796 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
797 * @param createRootDir Whether to create a new root or data directory path.
798 * @return The mini HBase cluster created.
799 * @see #shutdownMiniCluster()
800 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
801 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
802 * @see #startMiniCluster(StartMiniClusterOption)
803 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
806 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, boolean createRootDir
)
808 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
809 .numMasters(numMasters
).numRegionServers(numSlaves
).createRootDir(createRootDir
)
810 .numDataNodes(numSlaves
).build();
811 return startMiniCluster(option
);
815 * Start up a minicluster of hbase, dfs, and zookeeper.
816 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
817 * @param numMasters Master node number.
818 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
819 * @return The mini HBase cluster created.
820 * @see #shutdownMiniCluster()
821 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
822 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
823 * @see #startMiniCluster(StartMiniClusterOption)
824 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
827 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
) throws Exception
{
828 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
829 .numMasters(numMasters
).numRegionServers(numSlaves
).numDataNodes(numSlaves
).build();
830 return startMiniCluster(option
);
834 * Start up a minicluster of hbase, dfs, and zookeeper.
835 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
836 * @param numMasters Master node number.
837 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
838 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
839 * HDFS data node number.
840 * @param createRootDir Whether to create a new root or data directory path.
841 * @return The mini HBase cluster created.
842 * @see #shutdownMiniCluster()
843 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
844 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
845 * @see #startMiniCluster(StartMiniClusterOption)
846 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
849 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
,
850 boolean createRootDir
) throws Exception
{
851 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
852 .numMasters(numMasters
).numRegionServers(numSlaves
).createRootDir(createRootDir
)
853 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
).build();
854 return startMiniCluster(option
);
858 * Start up a minicluster of hbase, dfs, and zookeeper.
859 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
860 * @param numMasters Master node number.
861 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
862 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
863 * HDFS data node number.
864 * @return The mini HBase cluster created.
865 * @see #shutdownMiniCluster()
866 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
867 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
868 * @see #startMiniCluster(StartMiniClusterOption)
869 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
872 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
)
874 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
875 .numMasters(numMasters
).numRegionServers(numSlaves
)
876 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
).build();
877 return startMiniCluster(option
);
881 * Start up a minicluster of hbase, dfs, and zookeeper.
882 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
883 * @param numMasters Master node number.
884 * @param numRegionServers Number of region servers.
885 * @param numDataNodes Number of datanodes.
886 * @return The mini HBase cluster created.
887 * @see #shutdownMiniCluster()
888 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
889 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
890 * @see #startMiniCluster(StartMiniClusterOption)
891 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
894 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
)
896 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
897 .numMasters(numMasters
).numRegionServers(numRegionServers
).numDataNodes(numDataNodes
)
899 return startMiniCluster(option
);
903 * Start up a minicluster of hbase, dfs, and zookeeper.
904 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
905 * @param numMasters Master node number.
906 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
907 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
908 * HDFS data node number.
909 * @param masterClass The class to use as HMaster, or null for default.
910 * @param rsClass The class to use as HRegionServer, or null for default.
911 * @return The mini HBase cluster created.
912 * @see #shutdownMiniCluster()
913 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
914 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
915 * @see #startMiniCluster(StartMiniClusterOption)
916 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
919 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
,
920 Class
<?
extends HMaster
> masterClass
,
921 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
)
923 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
924 .numMasters(numMasters
).masterClass(masterClass
)
925 .numRegionServers(numSlaves
).rsClass(rsClass
)
926 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
)
928 return startMiniCluster(option
);
932 * Start up a minicluster of hbase, dfs, and zookeeper.
933 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
934 * @param numMasters Master node number.
935 * @param numRegionServers Number of region servers.
936 * @param numDataNodes Number of datanodes.
937 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
938 * HDFS data node number.
939 * @param masterClass The class to use as HMaster, or null for default.
940 * @param rsClass The class to use as HRegionServer, or null for default.
941 * @return The mini HBase cluster created.
942 * @see #shutdownMiniCluster()
943 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
944 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
945 * @see #startMiniCluster(StartMiniClusterOption)
946 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
949 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
,
950 String
[] dataNodeHosts
, Class
<?
extends HMaster
> masterClass
,
951 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
)
953 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
954 .numMasters(numMasters
).masterClass(masterClass
)
955 .numRegionServers(numRegionServers
).rsClass(rsClass
)
956 .numDataNodes(numDataNodes
).dataNodeHosts(dataNodeHosts
)
958 return startMiniCluster(option
);
962 * Start up a minicluster of hbase, dfs, and zookeeper.
963 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
964 * @param numMasters Master node number.
965 * @param numRegionServers Number of region servers.
966 * @param numDataNodes Number of datanodes.
967 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
968 * HDFS data node number.
969 * @param masterClass The class to use as HMaster, or null for default.
970 * @param rsClass The class to use as HRegionServer, or null for default.
971 * @param createRootDir Whether to create a new root or data directory path.
972 * @param createWALDir Whether to create a new WAL directory.
973 * @return The mini HBase cluster created.
974 * @see #shutdownMiniCluster()
975 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
976 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
977 * @see #startMiniCluster(StartMiniClusterOption)
978 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
981 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
,
982 String
[] dataNodeHosts
, Class
<?
extends HMaster
> masterClass
,
983 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
, boolean createRootDir
,
984 boolean createWALDir
) throws Exception
{
985 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
986 .numMasters(numMasters
).masterClass(masterClass
)
987 .numRegionServers(numRegionServers
).rsClass(rsClass
)
988 .numDataNodes(numDataNodes
).dataNodeHosts(dataNodeHosts
)
989 .createRootDir(createRootDir
).createWALDir(createWALDir
)
991 return startMiniCluster(option
);
995 * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
996 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
997 * @param numSlaves slave node number, for both HBase region server and HDFS data node.
998 * @see #startMiniCluster(StartMiniClusterOption option)
999 * @see #shutdownMiniDFSCluster()
1001 public MiniHBaseCluster
startMiniCluster(int numSlaves
) throws Exception
{
1002 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1003 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).build();
1004 return startMiniCluster(option
);
1008 * Start up a minicluster of hbase, dfs and zookeeper all using default options.
1009 * Option default value can be found in {@link StartMiniClusterOption.Builder}.
1010 * @see #startMiniCluster(StartMiniClusterOption option)
1011 * @see #shutdownMiniDFSCluster()
1013 public MiniHBaseCluster
startMiniCluster() throws Exception
{
1014 return startMiniCluster(StartMiniClusterOption
.builder().build());
1018 * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
1019 * It modifies Configuration. It homes the cluster data directory under a random
1020 * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
1021 * @see #shutdownMiniDFSCluster()
1023 public MiniHBaseCluster
startMiniCluster(StartMiniClusterOption option
) throws Exception
{
1024 LOG
.info("Starting up minicluster with option: {}", option
);
1026 // If we already put up a cluster, fail.
1027 if (miniClusterRunning
) {
1028 throw new IllegalStateException("A mini-cluster is already running");
1030 miniClusterRunning
= true;
1032 setupClusterTestDir();
1033 System
.setProperty(TEST_DIRECTORY_KEY
, this.clusterTestDir
.getPath());
1035 // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1036 // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1037 if (dfsCluster
== null) {
1038 LOG
.info("STARTING DFS");
1039 dfsCluster
= startMiniDFSCluster(option
.getNumDataNodes(), option
.getDataNodeHosts());
1041 LOG
.info("NOT STARTING DFS");
1044 // Start up a zk cluster.
1045 if (getZkCluster() == null) {
1046 startMiniZKCluster(option
.getNumZkServers());
1049 // Start the MiniHBaseCluster
1050 return startMiniHBaseCluster(option
);
1054 * Starts up mini hbase cluster.
1055 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1056 * This is useful when doing stepped startup of clusters.
1057 * @return Reference to the hbase mini hbase cluster.
1058 * @see #startMiniCluster(StartMiniClusterOption)
1059 * @see #shutdownMiniHBaseCluster()
1061 public MiniHBaseCluster
startMiniHBaseCluster(StartMiniClusterOption option
)
1062 throws IOException
, InterruptedException
{
1063 // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1064 createRootDir(option
.isCreateRootDir());
1065 if (option
.isCreateWALDir()) {
1068 // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1069 // for tests that do not read hbase-defaults.xml
1072 // These settings will make the server waits until this exact number of
1073 // regions servers are connected.
1074 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1) == -1) {
1075 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, option
.getNumRegionServers());
1077 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1) == -1) {
1078 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, option
.getNumRegionServers());
1081 Configuration c
= new Configuration(this.conf
);
1082 TraceUtil
.initTracer(c
);
1084 new MiniHBaseCluster(c
, option
.getNumMasters(), option
.getNumAlwaysStandByMasters(),
1085 option
.getNumRegionServers(), option
.getRsPorts(), option
.getMasterClass(),
1086 option
.getRsClass());
1087 // Populate the master address configuration from mini cluster configuration.
1088 conf
.set(HConstants
.MASTER_ADDRS_KEY
, MasterRegistry
.getMasterAddr(c
));
1089 // Don't leave here till we've done a successful scan of the hbase:meta
1090 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
1091 ResultScanner s
= t
.getScanner(new Scan());
1092 while (s
.next() != null) {
1098 getAdmin(); // create immediately the hbaseAdmin
1099 LOG
.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1101 return (MiniHBaseCluster
) hbaseCluster
;
1105 * Starts up mini hbase cluster using default options.
1106 * Default options can be found in {@link StartMiniClusterOption.Builder}.
1107 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1108 * @see #shutdownMiniHBaseCluster()
1110 public MiniHBaseCluster
startMiniHBaseCluster() throws IOException
, InterruptedException
{
1111 return startMiniHBaseCluster(StartMiniClusterOption
.builder().build());
1115 * Starts up mini hbase cluster.
1116 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1117 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1118 * @param numMasters Master node number.
1119 * @param numRegionServers Number of region servers.
1120 * @return The mini HBase cluster created.
1121 * @see #shutdownMiniHBaseCluster()
1122 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1123 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1124 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1125 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1128 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
)
1129 throws IOException
, InterruptedException
{
1130 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1131 .numMasters(numMasters
).numRegionServers(numRegionServers
).build();
1132 return startMiniHBaseCluster(option
);
1136 * Starts up mini hbase cluster.
1137 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1138 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1139 * @param numMasters Master node number.
1140 * @param numRegionServers Number of region servers.
1141 * @param rsPorts Ports that RegionServer should use.
1142 * @return The mini HBase cluster created.
1143 * @see #shutdownMiniHBaseCluster()
1144 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1145 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1146 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1147 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1150 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
1151 List
<Integer
> rsPorts
) throws IOException
, InterruptedException
{
1152 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1153 .numMasters(numMasters
).numRegionServers(numRegionServers
).rsPorts(rsPorts
).build();
1154 return startMiniHBaseCluster(option
);
1158 * Starts up mini hbase cluster.
1159 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1160 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1161 * @param numMasters Master node number.
1162 * @param numRegionServers Number of region servers.
1163 * @param rsPorts Ports that RegionServer should use.
1164 * @param masterClass The class to use as HMaster, or null for default.
1165 * @param rsClass The class to use as HRegionServer, or null for default.
1166 * @param createRootDir Whether to create a new root or data directory path.
1167 * @param createWALDir Whether to create a new WAL directory.
1168 * @return The mini HBase cluster created.
1169 * @see #shutdownMiniHBaseCluster()
1170 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1171 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1172 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1173 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1176 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
1177 List
<Integer
> rsPorts
, Class
<?
extends HMaster
> masterClass
,
1178 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
,
1179 boolean createRootDir
, boolean createWALDir
) throws IOException
, InterruptedException
{
1180 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1181 .numMasters(numMasters
).masterClass(masterClass
)
1182 .numRegionServers(numRegionServers
).rsClass(rsClass
).rsPorts(rsPorts
)
1183 .createRootDir(createRootDir
).createWALDir(createWALDir
).build();
1184 return startMiniHBaseCluster(option
);
1188 * Starts the hbase cluster up again after shutting it down previously in a
1189 * test. Use this if you want to keep dfs/zk up and just stop/start hbase.
1190 * @param servers number of region servers
1192 public void restartHBaseCluster(int servers
) throws IOException
, InterruptedException
{
1193 this.restartHBaseCluster(servers
, null);
1196 public void restartHBaseCluster(int servers
, List
<Integer
> ports
)
1197 throws IOException
, InterruptedException
{
1198 StartMiniClusterOption option
=
1199 StartMiniClusterOption
.builder().numRegionServers(servers
).rsPorts(ports
).build();
1200 restartHBaseCluster(option
);
1201 invalidateConnection();
1204 public void restartHBaseCluster(StartMiniClusterOption option
)
1205 throws IOException
, InterruptedException
{
1206 if (hbaseAdmin
!= null) {
1210 if (this.asyncConnection
!= null) {
1211 this.asyncConnection
.close();
1212 this.asyncConnection
= null;
1215 new MiniHBaseCluster(this.conf
, option
.getNumMasters(), option
.getNumAlwaysStandByMasters(),
1216 option
.getNumRegionServers(), option
.getRsPorts(), option
.getMasterClass(),
1217 option
.getRsClass());
1218 // Don't leave here till we've done a successful scan of the hbase:meta
1219 Connection conn
= ConnectionFactory
.createConnection(this.conf
);
1220 Table t
= conn
.getTable(TableName
.META_TABLE_NAME
);
1221 ResultScanner s
= t
.getScanner(new Scan());
1222 while (s
.next() != null) {
1225 LOG
.info("HBase has been restarted");
1232 * @return Current mini hbase cluster. Only has something in it after a call
1233 * to {@link #startMiniCluster()}.
1234 * @see #startMiniCluster()
1236 public MiniHBaseCluster
getMiniHBaseCluster() {
1237 if (this.hbaseCluster
== null || this.hbaseCluster
instanceof MiniHBaseCluster
) {
1238 return (MiniHBaseCluster
)this.hbaseCluster
;
1240 throw new RuntimeException(hbaseCluster
+ " not an instance of " +
1241 MiniHBaseCluster
.class.getName());
1245 * Stops mini hbase, zk, and hdfs clusters.
1246 * @see #startMiniCluster(int)
1248 public void shutdownMiniCluster() throws IOException
{
1249 LOG
.info("Shutting down minicluster");
1250 shutdownMiniHBaseCluster();
1251 shutdownMiniDFSCluster();
1252 shutdownMiniZKCluster();
1255 miniClusterRunning
= false;
1256 LOG
.info("Minicluster is down");
1260 * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1261 * @throws java.io.IOException in case command is unsuccessful
1263 public void shutdownMiniHBaseCluster() throws IOException
{
1265 if (this.hbaseCluster
!= null) {
1266 this.hbaseCluster
.shutdown();
1267 // Wait till hbase is down before going on to shutdown zk.
1268 this.hbaseCluster
.waitUntilShutDown();
1269 this.hbaseCluster
= null;
1271 if (zooKeeperWatcher
!= null) {
1272 zooKeeperWatcher
.close();
1273 zooKeeperWatcher
= null;
1278 * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1279 * @throws java.io.IOException throws in case command is unsuccessful
1281 public void killMiniHBaseCluster() throws IOException
{
1283 if (this.hbaseCluster
!= null) {
1284 getMiniHBaseCluster().killAll();
1285 this.hbaseCluster
= null;
1287 if (zooKeeperWatcher
!= null) {
1288 zooKeeperWatcher
.close();
1289 zooKeeperWatcher
= null;
1293 // close hbase admin, close current connection and reset MIN MAX configs for RS.
1294 private void cleanup() throws IOException
{
1296 // unset the configuration for MIN and MAX RS to start
1297 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
1298 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1);
1302 * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1303 * is true, a new root directory path is fetched irrespective of whether it has been fetched
1304 * before or not. If false, previous path is used.
1305 * Note: this does not cause the root dir to be created.
1306 * @return Fully qualified path for the default hbase root dir
1307 * @throws IOException
1309 public Path
getDefaultRootDirPath(boolean create
) throws IOException
{
1311 return getDataTestDirOnTestFS();
1313 return getNewDataTestDirOnTestFS();
1318 * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1319 * except that <code>create</code> flag is false.
1320 * Note: this does not cause the root dir to be created.
1321 * @return Fully qualified path for the default hbase root dir
1322 * @throws IOException
1324 public Path
getDefaultRootDirPath() throws IOException
{
1325 return getDefaultRootDirPath(false);
1329 * Creates an hbase rootdir in user home directory. Also creates hbase
1330 * version file. Normally you won't make use of this method. Root hbasedir
1331 * is created for you as part of mini cluster startup. You'd only use this
1332 * method if you were doing manual operation.
1333 * @param create This flag decides whether to get a new
1334 * root or data directory path or not, if it has been fetched already.
1335 * Note : Directory will be made irrespective of whether path has been fetched or not.
1336 * If directory already exists, it will be overwritten
1337 * @return Fully qualified path to hbase root dir
1338 * @throws IOException
1340 public Path
createRootDir(boolean create
) throws IOException
{
1341 FileSystem fs
= FileSystem
.get(this.conf
);
1342 Path hbaseRootdir
= getDefaultRootDirPath(create
);
1343 FSUtils
.setRootDir(this.conf
, hbaseRootdir
);
1344 fs
.mkdirs(hbaseRootdir
);
1345 FSUtils
.setVersion(fs
, hbaseRootdir
);
1346 return hbaseRootdir
;
1350 * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1351 * except that <code>create</code> flag is false.
1352 * @return Fully qualified path to hbase root dir
1353 * @throws IOException
1355 public Path
createRootDir() throws IOException
{
1356 return createRootDir(false);
1360 * Creates a hbase walDir in the user's home directory.
1361 * Normally you won't make use of this method. Root hbaseWALDir
1362 * is created for you as part of mini cluster startup. You'd only use this
1363 * method if you were doing manual operation.
1365 * @return Fully qualified path to hbase root dir
1366 * @throws IOException
1368 public Path
createWALRootDir() throws IOException
{
1369 FileSystem fs
= FileSystem
.get(this.conf
);
1370 Path walDir
= getNewDataTestDirOnTestFS();
1371 FSUtils
.setWALRootDir(this.conf
, walDir
);
1376 private void setHBaseFsTmpDir() throws IOException
{
1377 String hbaseFsTmpDirInString
= this.conf
.get("hbase.fs.tmp.dir");
1378 if (hbaseFsTmpDirInString
== null) {
1379 this.conf
.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1380 LOG
.info("Setting hbase.fs.tmp.dir to " + this.conf
.get("hbase.fs.tmp.dir"));
1382 LOG
.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString
);
1387 * Flushes all caches in the mini hbase cluster
1388 * @throws IOException
1390 public void flush() throws IOException
{
1391 getMiniHBaseCluster().flushcache();
1395 * Flushes all caches in the mini hbase cluster
1396 * @throws IOException
1398 public void flush(TableName tableName
) throws IOException
{
1399 getMiniHBaseCluster().flushcache(tableName
);
1403 * Compact all regions in the mini hbase cluster
1404 * @throws IOException
1406 public void compact(boolean major
) throws IOException
{
1407 getMiniHBaseCluster().compact(major
);
1411 * Compact all of a table's reagion in the mini hbase cluster
1412 * @throws IOException
1414 public void compact(TableName tableName
, boolean major
) throws IOException
{
1415 getMiniHBaseCluster().compact(tableName
, major
);
1422 * @return A Table instance for the created table.
1423 * @throws IOException
1425 public Table
createTable(TableName tableName
, String family
)
1427 return createTable(tableName
, new String
[]{family
});
1434 * @return A Table instance for the created table.
1435 * @throws IOException
1437 public Table
createTable(TableName tableName
, String
[] families
)
1438 throws IOException
{
1439 List
<byte[]> fams
= new ArrayList
<>(families
.length
);
1440 for (String family
: families
) {
1441 fams
.add(Bytes
.toBytes(family
));
1443 return createTable(tableName
, fams
.toArray(new byte[0][]));
1450 * @return A Table instance for the created table.
1451 * @throws IOException
1453 public Table
createTable(TableName tableName
, byte[] family
)
1455 return createTable(tableName
, new byte[][]{family
});
1459 * Create a table with multiple regions.
1463 * @return A Table instance for the created table.
1464 * @throws IOException
1466 public Table
createMultiRegionTable(TableName tableName
, byte[] family
, int numRegions
)
1467 throws IOException
{
1468 if (numRegions
< 3) throw new IOException("Must create at least 3 regions");
1469 byte[] startKey
= Bytes
.toBytes("aaaaa");
1470 byte[] endKey
= Bytes
.toBytes("zzzzz");
1471 byte[][] splitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
1473 return createTable(tableName
, new byte[][] { family
}, splitKeys
);
1480 * @return A Table instance for the created table.
1481 * @throws IOException
1483 public Table
createTable(TableName tableName
, byte[][] families
)
1484 throws IOException
{
1485 return createTable(tableName
, families
, (byte[][]) null);
1489 * Create a table with multiple regions.
1492 * @return A Table instance for the created table.
1493 * @throws IOException
1495 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
) throws IOException
{
1496 return createTable(tableName
, families
, KEYS_FOR_HBA_CREATE_TABLE
);
1504 * @return A Table instance for the created table.
1505 * @throws IOException
1507 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
)
1508 throws IOException
{
1509 return createTable(tableName
, families
, splitKeys
, 1, new Configuration(getConfiguration()));
1514 * @param tableName the table name
1515 * @param families the families
1516 * @param splitKeys the splitkeys
1517 * @param replicaCount the region replica count
1518 * @return A Table instance for the created table.
1519 * @throws IOException throws IOException
1521 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1522 int replicaCount
) throws IOException
{
1523 return createTable(tableName
, families
, splitKeys
, replicaCount
,
1524 new Configuration(getConfiguration()));
1527 public Table
createTable(TableName tableName
, byte[][] families
,
1528 int numVersions
, byte[] startKey
, byte[] endKey
, int numRegions
)
1530 HTableDescriptor desc
= createTableDescriptor(tableName
, families
, numVersions
);
1532 getAdmin().createTable(desc
, startKey
, endKey
, numRegions
);
1533 // HBaseAdmin only waits for regions to appear in hbase:meta we
1534 // should wait until they are assigned
1535 waitUntilAllRegionsAssigned(tableName
);
1536 return getConnection().getTable(tableName
);
1543 * @param c Configuration to use
1544 * @return A Table instance for the created table.
1545 * @throws IOException
1547 public Table
createTable(TableDescriptor htd
, byte[][] families
, Configuration c
)
1548 throws IOException
{
1549 return createTable(htd
, families
, null, c
);
1554 * @param htd table descriptor
1555 * @param families array of column families
1556 * @param splitKeys array of split keys
1557 * @param c Configuration to use
1558 * @return A Table instance for the created table.
1559 * @throws IOException if getAdmin or createTable fails
1561 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1562 Configuration c
) throws IOException
{
1563 // Disable blooms (they are on by default as of 0.95) but we disable them here because
1564 // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1565 // on is interfering.
1566 return createTable(htd
, families
, splitKeys
, BloomType
.NONE
, HConstants
.DEFAULT_BLOCKSIZE
, c
);
1571 * @param htd table descriptor
1572 * @param families array of column families
1573 * @param splitKeys array of split keys
1574 * @param type Bloom type
1575 * @param blockSize block size
1576 * @param c Configuration to use
1577 * @return A Table instance for the created table.
1578 * @throws IOException if getAdmin or createTable fails
1581 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1582 BloomType type
, int blockSize
, Configuration c
) throws IOException
{
1583 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1584 for (byte[] family
: families
) {
1585 ColumnFamilyDescriptorBuilder cfdb
= ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1586 .setBloomFilterType(type
)
1587 .setBlocksize(blockSize
);
1588 if (isNewVersionBehaviorEnabled()) {
1589 cfdb
.setNewVersionBehavior(true);
1591 builder
.setColumnFamily(cfdb
.build());
1593 TableDescriptor td
= builder
.build();
1594 if (splitKeys
!= null) {
1595 getAdmin().createTable(td
, splitKeys
);
1597 getAdmin().createTable(td
);
1599 // HBaseAdmin only waits for regions to appear in hbase:meta
1600 // we should wait until they are assigned
1601 waitUntilAllRegionsAssigned(td
.getTableName());
1602 return getConnection().getTable(td
.getTableName());
1607 * @param htd table descriptor
1608 * @param splitRows array of split keys
1609 * @return A Table instance for the created table.
1610 * @throws IOException
1612 public Table
createTable(TableDescriptor htd
, byte[][] splitRows
)
1613 throws IOException
{
1614 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1615 if (isNewVersionBehaviorEnabled()) {
1616 for (ColumnFamilyDescriptor family
: htd
.getColumnFamilies()) {
1617 builder
.setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1618 .setNewVersionBehavior(true).build());
1621 if (splitRows
!= null) {
1622 getAdmin().createTable(builder
.build(), splitRows
);
1624 getAdmin().createTable(builder
.build());
1626 // HBaseAdmin only waits for regions to appear in hbase:meta
1627 // we should wait until they are assigned
1628 waitUntilAllRegionsAssigned(htd
.getTableName());
1629 return getConnection().getTable(htd
.getTableName());
1634 * @param tableName the table name
1635 * @param families the families
1636 * @param splitKeys the split keys
1637 * @param replicaCount the replica count
1638 * @param c Configuration to use
1639 * @return A Table instance for the created table.
1640 * @throws IOException
1642 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1643 int replicaCount
, final Configuration c
) throws IOException
{
1644 HTableDescriptor htd
= new HTableDescriptor(tableName
);
1645 htd
.setRegionReplication(replicaCount
);
1646 return createTable(htd
, families
, splitKeys
, c
);
1653 * @param numVersions
1654 * @return A Table instance for the created table.
1655 * @throws IOException
1657 public Table
createTable(TableName tableName
, byte[] family
, int numVersions
)
1658 throws IOException
{
1659 return createTable(tableName
, new byte[][]{family
}, numVersions
);
1666 * @param numVersions
1667 * @return A Table instance for the created table.
1668 * @throws IOException
1670 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
)
1671 throws IOException
{
1672 return createTable(tableName
, families
, numVersions
, (byte[][]) null);
1679 * @param numVersions
1681 * @return A Table instance for the created table.
1682 * @throws IOException
1684 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
,
1685 byte[][] splitKeys
) throws IOException
{
1686 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1687 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1688 for (byte[] family
: families
) {
1689 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1690 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1691 .setMaxVersions(numVersions
);
1692 if (isNewVersionBehaviorEnabled()) {
1693 familyDescriptor
.setNewVersionBehavior(true);
1695 tableDescriptor
.setColumnFamily(familyDescriptor
);
1697 if (splitKeys
!= null) {
1698 getAdmin().createTable(tableDescriptor
, splitKeys
);
1700 getAdmin().createTable(tableDescriptor
);
1702 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1704 waitUntilAllRegionsAssigned(tableName
);
1705 return getConnection().getTable(tableName
);
1709 * Create a table with multiple regions.
1712 * @param numVersions
1713 * @return A Table instance for the created table.
1714 * @throws IOException
1716 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
, int numVersions
)
1717 throws IOException
{
1718 return createTable(tableName
, families
, numVersions
, KEYS_FOR_HBA_CREATE_TABLE
);
1725 * @param numVersions
1727 * @return A Table instance for the created table.
1728 * @throws IOException
1730 public Table
createTable(TableName tableName
, byte[][] families
,
1731 int numVersions
, int blockSize
) throws IOException
{
1732 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1733 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1734 for (byte[] family
: families
) {
1735 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1736 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1737 .setMaxVersions(numVersions
)
1738 .setBlocksize(blockSize
);
1739 if (isNewVersionBehaviorEnabled()) {
1740 familyDescriptor
.setNewVersionBehavior(true);
1742 tableDescriptor
.setColumnFamily(familyDescriptor
);
1744 getAdmin().createTable(tableDescriptor
);
1745 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1747 waitUntilAllRegionsAssigned(tableName
);
1748 return getConnection().getTable(tableName
);
1751 public Table
createTable(TableName tableName
, byte[][] families
,
1752 int numVersions
, int blockSize
, String cpName
) throws IOException
{
1753 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1754 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1755 for (byte[] family
: families
) {
1756 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1757 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1758 .setMaxVersions(numVersions
)
1759 .setBlocksize(blockSize
);
1760 if (isNewVersionBehaviorEnabled()) {
1761 familyDescriptor
.setNewVersionBehavior(true);
1763 tableDescriptor
.setColumnFamily(familyDescriptor
);
1765 if (cpName
!= null) {
1766 tableDescriptor
.setCoprocessor(cpName
);
1768 getAdmin().createTable(tableDescriptor
);
1769 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1771 waitUntilAllRegionsAssigned(tableName
);
1772 return getConnection().getTable(tableName
);
1779 * @param numVersions
1780 * @return A Table instance for the created table.
1781 * @throws IOException
1783 public Table
createTable(TableName tableName
, byte[][] families
,
1784 int[] numVersions
) throws IOException
{
1785 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1786 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1788 for (byte[] family
: families
) {
1789 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1790 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1791 .setMaxVersions(numVersions
[i
]);
1792 if (isNewVersionBehaviorEnabled()) {
1793 familyDescriptor
.setNewVersionBehavior(true);
1795 tableDescriptor
.setColumnFamily(familyDescriptor
);
1798 getAdmin().createTable(tableDescriptor
);
1799 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1801 waitUntilAllRegionsAssigned(tableName
);
1802 return getConnection().getTable(tableName
);
1810 * @return A Table instance for the created table.
1811 * @throws IOException
1813 public Table
createTable(TableName tableName
, byte[] family
, byte[][] splitRows
)
1814 throws IOException
{
1815 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1816 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1817 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1818 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
);
1819 if (isNewVersionBehaviorEnabled()) {
1820 familyDescriptor
.setNewVersionBehavior(true);
1822 tableDescriptor
.setColumnFamily(familyDescriptor
);
1823 getAdmin().createTable(tableDescriptor
, splitRows
);
1824 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1826 waitUntilAllRegionsAssigned(tableName
);
1827 return getConnection().getTable(tableName
);
1831 * Create a table with multiple regions.
1834 * @return A Table instance for the created table.
1835 * @throws IOException
1837 public Table
createMultiRegionTable(TableName tableName
, byte[] family
) throws IOException
{
1838 return createTable(tableName
, family
, KEYS_FOR_HBA_CREATE_TABLE
);
1842 * Modify a table, synchronous.
1843 * @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
1844 * {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
1845 * @see Admin#modifyTable(TableDescriptor)
1846 * @see <a href="https://issues.apache.org/jira/browse/HBASE-22002">HBASE-22002</a>
1849 public static void modifyTableSync(Admin admin
, TableDescriptor desc
)
1850 throws IOException
, InterruptedException
{
1851 admin
.modifyTable(desc
);
1855 * Set the number of Region replicas.
1857 public static void setReplicas(Admin admin
, TableName table
, int replicaCount
)
1858 throws IOException
, InterruptedException
{
1859 admin
.disableTable(table
);
1860 HTableDescriptor desc
= new HTableDescriptor(admin
.getDescriptor(table
));
1861 desc
.setRegionReplication(replicaCount
);
1862 admin
.modifyTable(desc
);
1863 admin
.enableTable(table
);
1867 * Drop an existing table
1868 * @param tableName existing table
1870 public void deleteTable(TableName tableName
) throws IOException
{
1872 getAdmin().disableTable(tableName
);
1873 } catch (TableNotEnabledException e
) {
1874 LOG
.debug("Table: " + tableName
+ " already disabled, so just deleting it.");
1876 getAdmin().deleteTable(tableName
);
1880 * Drop an existing table
1881 * @param tableName existing table
1883 public void deleteTableIfAny(TableName tableName
) throws IOException
{
1885 deleteTable(tableName
);
1886 } catch (TableNotFoundException e
) {
1891 // ==========================================================================
1892 // Canned table and table descriptor creation
1893 // TODO replace HBaseTestCase
1895 public final static byte [] fam1
= Bytes
.toBytes("colfamily11");
1896 public final static byte [] fam2
= Bytes
.toBytes("colfamily21");
1897 public final static byte [] fam3
= Bytes
.toBytes("colfamily31");
1898 public static final byte[][] COLUMNS
= {fam1
, fam2
, fam3
};
1899 private static final int MAXVERSIONS
= 3;
1901 public static final char FIRST_CHAR
= 'a';
1902 public static final char LAST_CHAR
= 'z';
1903 public static final byte [] START_KEY_BYTES
= {FIRST_CHAR
, FIRST_CHAR
, FIRST_CHAR
};
1904 public static final String START_KEY
= new String(START_KEY_BYTES
, HConstants
.UTF8_CHARSET
);
1906 public TableDescriptorBuilder
.ModifyableTableDescriptor
createModifyableTableDescriptor(
1907 final String name
) {
1908 return createModifyableTableDescriptor(TableName
.valueOf(name
),
1909 HColumnDescriptor
.DEFAULT_MIN_VERSIONS
,
1910 MAXVERSIONS
, HConstants
.FOREVER
, HColumnDescriptor
.DEFAULT_KEEP_DELETED
);
1913 public HTableDescriptor
createTableDescriptor(final TableName name
,
1914 final int minVersions
, final int versions
, final int ttl
, KeepDeletedCells keepDeleted
) {
1915 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1916 new TableDescriptorBuilder
.ModifyableTableDescriptor(name
);
1917 for (byte[] cfName
: new byte[][]{fam1
, fam2
, fam3
}) {
1918 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1919 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(cfName
)
1920 .setMinVersions(minVersions
)
1921 .setMaxVersions(versions
)
1922 .setKeepDeletedCells(keepDeleted
)
1923 .setBlockCacheEnabled(false)
1924 .setTimeToLive(ttl
);
1925 if (isNewVersionBehaviorEnabled()) {
1926 familyDescriptor
.setNewVersionBehavior(true);
1928 tableDescriptor
.setColumnFamily(familyDescriptor
);
1930 return new HTableDescriptor(tableDescriptor
);
1933 public TableDescriptorBuilder
.ModifyableTableDescriptor
createModifyableTableDescriptor(
1934 final TableName name
, final int minVersions
, final int versions
, final int ttl
,
1935 KeepDeletedCells keepDeleted
) {
1936 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1937 new TableDescriptorBuilder
.ModifyableTableDescriptor(name
);
1938 for (byte[] cfName
: new byte[][]{fam1
, fam2
, fam3
}) {
1939 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1940 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(cfName
)
1941 .setMinVersions(minVersions
)
1942 .setMaxVersions(versions
)
1943 .setKeepDeletedCells(keepDeleted
)
1944 .setBlockCacheEnabled(false)
1945 .setTimeToLive(ttl
);
1946 if (isNewVersionBehaviorEnabled()) {
1947 familyDescriptor
.setNewVersionBehavior(true);
1949 tableDescriptor
.setColumnFamily(familyDescriptor
);
1951 return tableDescriptor
;
1955 * Create a table of name <code>name</code>.
1956 * @param name Name to give table.
1957 * @return Column descriptor.
1959 public HTableDescriptor
createTableDescriptor(final TableName name
) {
1960 return createTableDescriptor(name
, HColumnDescriptor
.DEFAULT_MIN_VERSIONS
,
1961 MAXVERSIONS
, HConstants
.FOREVER
, HColumnDescriptor
.DEFAULT_KEEP_DELETED
);
1964 public HTableDescriptor
createTableDescriptor(final TableName tableName
,
1966 return createTableDescriptor(tableName
, new byte[][] {family
}, 1);
1969 public HTableDescriptor
createTableDescriptor(final TableName tableName
,
1970 byte[][] families
, int maxVersions
) {
1971 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1972 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1974 for (byte[] family
: families
) {
1975 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1976 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1977 .setMaxVersions(maxVersions
);
1978 if (isNewVersionBehaviorEnabled()) {
1979 familyDescriptor
.setNewVersionBehavior(true);
1981 tableDescriptor
.setColumnFamily(familyDescriptor
);
1983 return new HTableDescriptor(tableDescriptor
);
1987 * Create an HRegion that writes to the local tmp dirs
1988 * @param desc a table descriptor indicating which table the region belongs to
1989 * @param startKey the start boundary of the region
1990 * @param endKey the end boundary of the region
1991 * @return a region that writes to local dir for testing
1992 * @throws IOException
1994 public HRegion
createLocalHRegion(TableDescriptor desc
, byte [] startKey
,
1996 throws IOException
{
1997 HRegionInfo hri
= new HRegionInfo(desc
.getTableName(), startKey
, endKey
);
1998 return createLocalHRegion(hri
, desc
);
2002 * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
2003 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
2005 public HRegion
createLocalHRegion(RegionInfo info
, TableDescriptor desc
) throws IOException
{
2006 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), desc
);
2010 * Create an HRegion that writes to the local tmp dirs with specified wal
2011 * @param info regioninfo
2012 * @param desc table descriptor
2013 * @param wal wal for this region.
2014 * @return created hregion
2015 * @throws IOException
2017 public HRegion
createLocalHRegion(RegionInfo info
, TableDescriptor desc
, WAL wal
)
2018 throws IOException
{
2019 return HRegion
.createHRegion(info
, getDataTestDir(), getConfiguration(), desc
, wal
);
2028 * @return A region on which you must call
2029 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2030 * @throws IOException
2032 public HRegion
createLocalHRegion(TableName tableName
, byte[] startKey
, byte[] stopKey
,
2033 boolean isReadOnly
, Durability durability
, WAL wal
, byte[]... families
) throws IOException
{
2034 return createLocalHRegionWithInMemoryFlags(tableName
,startKey
, stopKey
, isReadOnly
,
2035 durability
, wal
, null, families
);
2038 public HRegion
createLocalHRegionWithInMemoryFlags(TableName tableName
, byte[] startKey
,
2040 boolean isReadOnly
, Durability durability
, WAL wal
, boolean[] compactedMemStore
,
2042 throws IOException
{
2043 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
2044 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
2045 tableDescriptor
.setReadOnly(isReadOnly
);
2047 for (byte[] family
: families
) {
2048 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
2049 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
);
2050 if (compactedMemStore
!= null && i
< compactedMemStore
.length
) {
2051 familyDescriptor
.setInMemoryCompaction(MemoryCompactionPolicy
.BASIC
);
2053 familyDescriptor
.setInMemoryCompaction(MemoryCompactionPolicy
.NONE
);
2057 // Set default to be three versions.
2058 familyDescriptor
.setMaxVersions(Integer
.MAX_VALUE
);
2059 tableDescriptor
.setColumnFamily(familyDescriptor
);
2061 tableDescriptor
.setDurability(durability
);
2062 HRegionInfo info
= new HRegionInfo(tableDescriptor
.getTableName(), startKey
, stopKey
, false);
2063 return createLocalHRegion(info
, tableDescriptor
, wal
);
2067 // ==========================================================================
2070 * Provide an existing table name to truncate.
2071 * Scans the table and issues a delete for each row read.
2072 * @param tableName existing table
2073 * @return HTable to that new table
2074 * @throws IOException
2076 public Table
deleteTableData(TableName tableName
) throws IOException
{
2077 Table table
= getConnection().getTable(tableName
);
2078 Scan scan
= new Scan();
2079 ResultScanner resScan
= table
.getScanner(scan
);
2080 for(Result res
: resScan
) {
2081 Delete del
= new Delete(res
.getRow());
2084 resScan
= table
.getScanner(scan
);
2090 * Truncate a table using the admin command.
2091 * Effectively disables, deletes, and recreates the table.
2092 * @param tableName table which must exist.
2093 * @param preserveRegions keep the existing split points
2094 * @return HTable for the new table
2096 public Table
truncateTable(final TableName tableName
, final boolean preserveRegions
) throws
2098 Admin admin
= getAdmin();
2099 if (!admin
.isTableDisabled(tableName
)) {
2100 admin
.disableTable(tableName
);
2102 admin
.truncateTable(tableName
, preserveRegions
);
2103 return getConnection().getTable(tableName
);
2107 * Truncate a table using the admin command.
2108 * Effectively disables, deletes, and recreates the table.
2109 * For previous behavior of issuing row deletes, see
2111 * Expressly does not preserve regions of existing table.
2112 * @param tableName table which must exist.
2113 * @return HTable for the new table
2115 public Table
truncateTable(final TableName tableName
) throws IOException
{
2116 return truncateTable(tableName
, false);
2120 * Load table with rows from 'aaa' to 'zzz'.
2123 * @return Count of rows loaded.
2124 * @throws IOException
2126 public int loadTable(final Table t
, final byte[] f
) throws IOException
{
2127 return loadTable(t
, new byte[][] {f
});
2131 * Load table with rows from 'aaa' to 'zzz'.
2134 * @return Count of rows loaded.
2135 * @throws IOException
2137 public int loadTable(final Table t
, final byte[] f
, boolean writeToWAL
) throws IOException
{
2138 return loadTable(t
, new byte[][] {f
}, null, writeToWAL
);
2142 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2144 * @param f Array of Families to load
2145 * @return Count of rows loaded.
2146 * @throws IOException
2148 public int loadTable(final Table t
, final byte[][] f
) throws IOException
{
2149 return loadTable(t
, f
, null);
2153 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2155 * @param f Array of Families to load
2156 * @param value the values of the cells. If null is passed, the row key is used as value
2157 * @return Count of rows loaded.
2158 * @throws IOException
2160 public int loadTable(final Table t
, final byte[][] f
, byte[] value
) throws IOException
{
2161 return loadTable(t
, f
, value
, true);
2165 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2167 * @param f Array of Families to load
2168 * @param value the values of the cells. If null is passed, the row key is used as value
2169 * @return Count of rows loaded.
2170 * @throws IOException
2172 public int loadTable(final Table t
, final byte[][] f
, byte[] value
,
2173 boolean writeToWAL
) throws IOException
{
2174 List
<Put
> puts
= new ArrayList
<>();
2175 for (byte[] row
: HBaseTestingUtility
.ROWS
) {
2176 Put put
= new Put(row
);
2177 put
.setDurability(writeToWAL ? Durability
.USE_DEFAULT
: Durability
.SKIP_WAL
);
2178 for (int i
= 0; i
< f
.length
; i
++) {
2179 byte[] value1
= value
!= null ? value
: row
;
2180 put
.addColumn(f
[i
], f
[i
], value1
);
2188 /** A tracker for tracking and validating table rows
2189 * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2191 public static class SeenRowTracker
{
2192 int dim
= 'z' - 'a' + 1;
2193 int[][][] seenRows
= new int[dim
][dim
][dim
]; //count of how many times the row is seen
2197 public SeenRowTracker(byte[] startRow
, byte[] stopRow
) {
2198 this.startRow
= startRow
;
2199 this.stopRow
= stopRow
;
2203 for (byte[] row
: ROWS
) {
2204 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])] = 0;
2212 public void addRow(byte[] row
) {
2213 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])]++;
2216 /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2217 * all other rows none
2219 public void validate() {
2220 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2221 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2222 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2223 int count
= seenRows
[i(b1
)][i(b2
)][i(b3
)];
2224 int expectedCount
= 0;
2225 if (Bytes
.compareTo(new byte[] {b1
,b2
,b3
}, startRow
) >= 0
2226 && Bytes
.compareTo(new byte[] {b1
,b2
,b3
}, stopRow
) < 0) {
2229 if (count
!= expectedCount
) {
2230 String row
= new String(new byte[] {b1
,b2
,b3
}, StandardCharsets
.UTF_8
);
2231 throw new RuntimeException("Row:" + row
+ " has a seen count of " + count
+ " " +
2232 "instead of " + expectedCount
);
2240 public int loadRegion(final HRegion r
, final byte[] f
) throws IOException
{
2241 return loadRegion(r
, f
, false);
2244 public int loadRegion(final Region r
, final byte[] f
) throws IOException
{
2245 return loadRegion((HRegion
)r
, f
);
2249 * Load region with rows from 'aaa' to 'zzz'.
2252 * @param flush flush the cache if true
2253 * @return Count of rows loaded.
2254 * @throws IOException
2256 public int loadRegion(final HRegion r
, final byte[] f
, final boolean flush
)
2257 throws IOException
{
2258 byte[] k
= new byte[3];
2260 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2261 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2262 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2266 Put put
= new Put(k
);
2267 put
.setDurability(Durability
.SKIP_WAL
);
2268 put
.addColumn(f
, null, k
);
2269 if (r
.getWAL() == null) {
2270 put
.setDurability(Durability
.SKIP_WAL
);
2272 int preRowCount
= rowCount
;
2274 int maxPause
= 1000;
2275 while (rowCount
== preRowCount
) {
2279 } catch (RegionTooBusyException e
) {
2280 pause
= (pause
* 2 >= maxPause
) ? maxPause
: pause
* 2;
2281 Threads
.sleep(pause
);
2293 public void loadNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
2294 throws IOException
{
2295 for (int i
= startRow
; i
< endRow
; i
++) {
2296 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2297 Put put
= new Put(data
);
2298 put
.addColumn(f
, null, data
);
2303 public void loadRandomRows(final Table t
, final byte[] f
, int rowSize
, int totalRows
)
2304 throws IOException
{
2305 Random r
= new Random();
2306 byte[] row
= new byte[rowSize
];
2307 for (int i
= 0; i
< totalRows
; i
++) {
2309 Put put
= new Put(row
);
2310 put
.addColumn(f
, new byte[]{0}, new byte[]{0});
2315 public void verifyNumericRows(Table table
, final byte[] f
, int startRow
, int endRow
,
2317 throws IOException
{
2318 for (int i
= startRow
; i
< endRow
; i
++) {
2319 String failMsg
= "Failed verification of row :" + i
;
2320 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2321 Get get
= new Get(data
);
2322 get
.setReplicaId(replicaId
);
2323 get
.setConsistency(Consistency
.TIMELINE
);
2324 Result result
= table
.get(get
);
2325 assertTrue(failMsg
, result
.containsColumn(f
, null));
2326 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
2327 Cell cell
= result
.getColumnLatestCell(f
, null);
2329 Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(), cell
.getValueOffset(),
2330 cell
.getValueLength()));
2334 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
)
2335 throws IOException
{
2336 verifyNumericRows((HRegion
)region
, f
, startRow
, endRow
);
2339 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
)
2340 throws IOException
{
2341 verifyNumericRows(region
, f
, startRow
, endRow
, true);
2344 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
,
2345 final boolean present
) throws IOException
{
2346 verifyNumericRows((HRegion
)region
, f
, startRow
, endRow
, present
);
2349 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
,
2350 final boolean present
) throws IOException
{
2351 for (int i
= startRow
; i
< endRow
; i
++) {
2352 String failMsg
= "Failed verification of row :" + i
;
2353 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2354 Result result
= region
.get(new Get(data
));
2356 boolean hasResult
= result
!= null && !result
.isEmpty();
2357 assertEquals(failMsg
+ result
, present
, hasResult
);
2358 if (!present
) continue;
2360 assertTrue(failMsg
, result
.containsColumn(f
, null));
2361 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
2362 Cell cell
= result
.getColumnLatestCell(f
, null);
2364 Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(), cell
.getValueOffset(),
2365 cell
.getValueLength()));
2369 public void deleteNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
2370 throws IOException
{
2371 for (int i
= startRow
; i
< endRow
; i
++) {
2372 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2373 Delete delete
= new Delete(data
);
2374 delete
.addFamily(f
);
2380 * Return the number of rows in the given table.
2381 * @param table to count rows
2382 * @return count of rows
2384 public static int countRows(final Table table
) throws IOException
{
2385 return countRows(table
, new Scan());
2388 public static int countRows(final Table table
, final Scan scan
) throws IOException
{
2389 try (ResultScanner results
= table
.getScanner(scan
)) {
2391 while (results
.next() != null) {
2398 public int countRows(final Table table
, final byte[]... families
) throws IOException
{
2399 Scan scan
= new Scan();
2400 for (byte[] family
: families
) {
2401 scan
.addFamily(family
);
2403 return countRows(table
, scan
);
2407 * Return the number of rows in the given table.
2409 public int countRows(final TableName tableName
) throws IOException
{
2410 Table table
= getConnection().getTable(tableName
);
2412 return countRows(table
);
2418 public int countRows(final Region region
) throws IOException
{
2419 return countRows(region
, new Scan());
2422 public int countRows(final Region region
, final Scan scan
) throws IOException
{
2423 InternalScanner scanner
= region
.getScanner(scan
);
2425 return countRows(scanner
);
2431 public int countRows(final InternalScanner scanner
) throws IOException
{
2432 int scannedCount
= 0;
2433 List
<Cell
> results
= new ArrayList
<>();
2434 boolean hasMore
= true;
2436 hasMore
= scanner
.next(results
);
2437 scannedCount
+= results
.size();
2440 return scannedCount
;
2444 * Return an md5 digest of the entire contents of a table.
2446 public String
checksumRows(final Table table
) throws Exception
{
2448 Scan scan
= new Scan();
2449 ResultScanner results
= table
.getScanner(scan
);
2450 MessageDigest digest
= MessageDigest
.getInstance("MD5");
2451 for (Result res
: results
) {
2452 digest
.update(res
.getRow());
2455 return digest
.toString();
2458 /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2459 public static final byte[][] ROWS
= new byte[(int) Math
.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2462 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2463 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2464 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2474 public static final byte[][] KEYS
= {
2475 HConstants
.EMPTY_BYTE_ARRAY
, Bytes
.toBytes("bbb"),
2476 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"),
2477 Bytes
.toBytes("fff"), Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"),
2478 Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"), Bytes
.toBytes("kkk"),
2479 Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2480 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"),
2481 Bytes
.toBytes("rrr"), Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"),
2482 Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"), Bytes
.toBytes("www"),
2483 Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy")
2486 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE
= {
2487 Bytes
.toBytes("bbb"),
2488 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"),
2489 Bytes
.toBytes("fff"), Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"),
2490 Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"), Bytes
.toBytes("kkk"),
2491 Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2492 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"),
2493 Bytes
.toBytes("rrr"), Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"),
2494 Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"), Bytes
.toBytes("www"),
2495 Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy"), Bytes
.toBytes("zzz")
2499 * Create rows in hbase:meta for regions of the specified table with the specified
2500 * start keys. The first startKey should be a 0 length byte array if you
2501 * want to form a proper range of regions.
2505 * @return list of region info for regions added to meta
2506 * @throws IOException
2508 public List
<RegionInfo
> createMultiRegionsInMeta(final Configuration conf
,
2509 final TableDescriptor htd
, byte [][] startKeys
)
2510 throws IOException
{
2511 Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2512 Arrays
.sort(startKeys
, Bytes
.BYTES_COMPARATOR
);
2513 List
<RegionInfo
> newRegions
= new ArrayList
<>(startKeys
.length
);
2515 .updateTableState(getConnection(), htd
.getTableName(), TableState
.State
.ENABLED
);
2517 for (int i
= 0; i
< startKeys
.length
; i
++) {
2518 int j
= (i
+ 1) % startKeys
.length
;
2519 RegionInfo hri
= RegionInfoBuilder
.newBuilder(htd
.getTableName())
2520 .setStartKey(startKeys
[i
])
2521 .setEndKey(startKeys
[j
])
2523 MetaTableAccessor
.addRegionToMeta(getConnection(), hri
);
2524 newRegions
.add(hri
);
2532 * Create an unmanaged WAL. Be sure to close it when you're through.
2534 public static WAL
createWal(final Configuration conf
, final Path rootDir
, final RegionInfo hri
)
2535 throws IOException
{
2536 // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2537 // unless I pass along via the conf.
2538 Configuration confForWAL
= new Configuration(conf
);
2539 confForWAL
.set(HConstants
.HBASE_DIR
, rootDir
.toString());
2540 return new WALFactory(confForWAL
, "hregion-" + RandomStringUtils
.randomNumeric(8)).getWAL(hri
);
2545 * Create a region with it's own WAL. Be sure to call
2546 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2548 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2549 final Configuration conf
, final TableDescriptor htd
) throws IOException
{
2550 return createRegionAndWAL(info
, rootDir
, conf
, htd
, true);
2554 * Create a region with it's own WAL. Be sure to call
2555 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2557 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2558 final Configuration conf
, final TableDescriptor htd
, BlockCache blockCache
)
2559 throws IOException
{
2560 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2561 region
.setBlockCache(blockCache
);
2562 region
.initialize();
2566 * Create a region with it's own WAL. Be sure to call
2567 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2569 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2570 final Configuration conf
, final TableDescriptor htd
, MobFileCache mobFileCache
)
2571 throws IOException
{
2572 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2573 region
.setMobFileCache(mobFileCache
);
2574 region
.initialize();
2579 * Create a region with it's own WAL. Be sure to call
2580 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2582 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2583 final Configuration conf
, final TableDescriptor htd
, boolean initialize
)
2584 throws IOException
{
2585 ChunkCreator
.initialize(MemStoreLABImpl
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null);
2586 WAL wal
= createWal(conf
, rootDir
, info
);
2587 return HRegion
.createHRegion(info
, rootDir
, conf
, htd
, wal
, initialize
);
2591 * Returns all rows from the hbase:meta table.
2593 * @throws IOException When reading the rows fails.
2595 public List
<byte[]> getMetaTableRows() throws IOException
{
2596 // TODO: Redo using MetaTableAccessor class
2597 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2598 List
<byte[]> rows
= new ArrayList
<>();
2599 ResultScanner s
= t
.getScanner(new Scan());
2600 for (Result result
: s
) {
2601 LOG
.info("getMetaTableRows: row -> " +
2602 Bytes
.toStringBinary(result
.getRow()));
2603 rows
.add(result
.getRow());
2611 * Returns all rows from the hbase:meta table for a given user table
2613 * @throws IOException When reading the rows fails.
2615 public List
<byte[]> getMetaTableRows(TableName tableName
) throws IOException
{
2616 // TODO: Redo using MetaTableAccessor.
2617 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2618 List
<byte[]> rows
= new ArrayList
<>();
2619 ResultScanner s
= t
.getScanner(new Scan());
2620 for (Result result
: s
) {
2621 RegionInfo info
= MetaTableAccessor
.getRegionInfo(result
);
2623 LOG
.error("No region info for row " + Bytes
.toString(result
.getRow()));
2624 // TODO figure out what to do for this new hosed case.
2628 if (info
.getTable().equals(tableName
)) {
2629 LOG
.info("getMetaTableRows: row -> " +
2630 Bytes
.toStringBinary(result
.getRow()) + info
);
2631 rows
.add(result
.getRow());
2640 * Returns all regions of the specified table
2642 * @param tableName the table name
2643 * @return all regions of the specified table
2644 * @throws IOException when getting the regions fails.
2646 private List
<RegionInfo
> getRegions(TableName tableName
) throws IOException
{
2647 try (Admin admin
= getConnection().getAdmin()) {
2648 return admin
.getRegions(tableName
);
2653 * Find any other region server which is different from the one identified by parameter
2655 * @return another region server
2657 public HRegionServer
getOtherRegionServer(HRegionServer rs
) {
2658 for (JVMClusterUtil
.RegionServerThread rst
:
2659 getMiniHBaseCluster().getRegionServerThreads()) {
2660 if (!(rst
.getRegionServer() == rs
)) {
2661 return rst
.getRegionServer();
2668 * Tool to get the reference to the region server object that holds the
2669 * region of the specified user table.
2670 * @param tableName user table to lookup in hbase:meta
2671 * @return region server that holds it, null if the row doesn't exist
2672 * @throws IOException
2673 * @throws InterruptedException
2675 public HRegionServer
getRSForFirstRegionInTable(TableName tableName
)
2676 throws IOException
, InterruptedException
{
2677 List
<RegionInfo
> regions
= getRegions(tableName
);
2678 if (regions
== null || regions
.isEmpty()) {
2681 LOG
.debug("Found " + regions
.size() + " regions for table " +
2684 byte[] firstRegionName
= regions
.stream()
2685 .filter(r
-> !r
.isOffline())
2686 .map(RegionInfo
::getRegionName
)
2688 .orElseThrow(() -> new IOException("online regions not found in table " + tableName
));
2690 LOG
.debug("firstRegionName=" + Bytes
.toString(firstRegionName
));
2691 long pause
= getConfiguration().getLong(HConstants
.HBASE_CLIENT_PAUSE
,
2692 HConstants
.DEFAULT_HBASE_CLIENT_PAUSE
);
2693 int numRetries
= getConfiguration().getInt(HConstants
.HBASE_CLIENT_RETRIES_NUMBER
,
2694 HConstants
.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER
);
2695 RetryCounter retrier
= new RetryCounter(numRetries
+1, (int)pause
, TimeUnit
.MICROSECONDS
);
2696 while(retrier
.shouldRetry()) {
2697 int index
= getMiniHBaseCluster().getServerWith(firstRegionName
);
2699 return getMiniHBaseCluster().getRegionServerThreads().get(index
).getRegionServer();
2701 // Came back -1. Region may not be online yet. Sleep a while.
2702 retrier
.sleepUntilNextRetry();
2708 * Starts a <code>MiniMRCluster</code> with a default number of
2709 * <code>TaskTracker</code>'s.
2711 * @throws IOException When starting the cluster fails.
2713 public MiniMRCluster
startMiniMapReduceCluster() throws IOException
{
2714 // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2716 "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2718 startMiniMapReduceCluster(2);
2723 * Tasktracker has a bug where changing the hadoop.log.dir system property
2724 * will not change its internal static LOG_DIR variable.
2726 private void forceChangeTaskLogDir() {
2729 logDirField
= TaskLog
.class.getDeclaredField("LOG_DIR");
2730 logDirField
.setAccessible(true);
2732 Field modifiersField
= Field
.class.getDeclaredField("modifiers");
2733 modifiersField
.setAccessible(true);
2734 modifiersField
.setInt(logDirField
, logDirField
.getModifiers() & ~Modifier
.FINAL
);
2736 logDirField
.set(null, new File(hadoopLogDir
, "userlogs"));
2737 } catch (SecurityException e
) {
2738 throw new RuntimeException(e
);
2739 } catch (NoSuchFieldException e
) {
2740 // TODO Auto-generated catch block
2741 throw new RuntimeException(e
);
2742 } catch (IllegalArgumentException e
) {
2743 throw new RuntimeException(e
);
2744 } catch (IllegalAccessException e
) {
2745 throw new RuntimeException(e
);
2750 * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2752 * @param servers The number of <code>TaskTracker</code>'s to start.
2753 * @throws IOException When starting the cluster fails.
2755 private void startMiniMapReduceCluster(final int servers
) throws IOException
{
2756 if (mrCluster
!= null) {
2757 throw new IllegalStateException("MiniMRCluster is already running");
2759 LOG
.info("Starting mini mapreduce cluster...");
2760 setupClusterTestDir();
2761 createDirsAndSetProperties();
2763 forceChangeTaskLogDir();
2765 //// hadoop2 specific settings
2766 // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2767 // we up the VM usable so that processes don't get killed.
2768 conf
.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f
);
2770 // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2771 // this avoids the problem by disabling speculative task execution in tests.
2772 conf
.setBoolean("mapreduce.map.speculative", false);
2773 conf
.setBoolean("mapreduce.reduce.speculative", false);
2776 // Allow the user to override FS URI for this map-reduce cluster to use.
2777 mrCluster
= new MiniMRCluster(servers
,
2778 FS_URI
!= null ? FS_URI
: FileSystem
.get(conf
).getUri().toString(), 1,
2779 null, null, new JobConf(this.conf
));
2780 JobConf jobConf
= MapreduceTestingShim
.getJobConf(mrCluster
);
2781 if (jobConf
== null) {
2782 jobConf
= mrCluster
.createJobConf();
2785 jobConf
.set("mapreduce.cluster.local.dir",
2786 conf
.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2787 LOG
.info("Mini mapreduce cluster started");
2789 // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2790 // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
2791 // necessary config properties here. YARN-129 required adding a few properties.
2792 conf
.set("mapreduce.jobtracker.address", jobConf
.get("mapreduce.jobtracker.address"));
2793 // this for mrv2 support; mr1 ignores this
2794 conf
.set("mapreduce.framework.name", "yarn");
2795 conf
.setBoolean("yarn.is.minicluster", true);
2796 String rmAddress
= jobConf
.get("yarn.resourcemanager.address");
2797 if (rmAddress
!= null) {
2798 conf
.set("yarn.resourcemanager.address", rmAddress
);
2800 String historyAddress
= jobConf
.get("mapreduce.jobhistory.address");
2801 if (historyAddress
!= null) {
2802 conf
.set("mapreduce.jobhistory.address", historyAddress
);
2804 String schedulerAddress
=
2805 jobConf
.get("yarn.resourcemanager.scheduler.address");
2806 if (schedulerAddress
!= null) {
2807 conf
.set("yarn.resourcemanager.scheduler.address", schedulerAddress
);
2809 String mrJobHistoryWebappAddress
=
2810 jobConf
.get("mapreduce.jobhistory.webapp.address");
2811 if (mrJobHistoryWebappAddress
!= null) {
2812 conf
.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress
);
2814 String yarnRMWebappAddress
=
2815 jobConf
.get("yarn.resourcemanager.webapp.address");
2816 if (yarnRMWebappAddress
!= null) {
2817 conf
.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress
);
2822 * Stops the previously started <code>MiniMRCluster</code>.
2824 public void shutdownMiniMapReduceCluster() {
2825 if (mrCluster
!= null) {
2826 LOG
.info("Stopping mini mapreduce cluster...");
2827 mrCluster
.shutdown();
2829 LOG
.info("Mini mapreduce cluster stopped");
2831 // Restore configuration to point to local jobtracker
2832 conf
.set("mapreduce.jobtracker.address", "local");
2836 * Create a stubbed out RegionServerService, mainly for getting FS.
2838 public RegionServerServices
createMockRegionServerService() throws IOException
{
2839 return createMockRegionServerService((ServerName
)null);
2843 * Create a stubbed out RegionServerService, mainly for getting FS.
2844 * This version is used by TestTokenAuthentication
2846 public RegionServerServices
createMockRegionServerService(RpcServerInterface rpc
) throws
2848 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher());
2849 rss
.setFileSystem(getTestFileSystem());
2850 rss
.setRpcServer(rpc
);
2855 * Create a stubbed out RegionServerService, mainly for getting FS.
2856 * This version is used by TestOpenRegionHandler
2858 public RegionServerServices
createMockRegionServerService(ServerName name
) throws IOException
{
2859 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher(), name
);
2860 rss
.setFileSystem(getTestFileSystem());
2865 * Switches the logger for the given class to DEBUG level.
2867 * @param clazz The class for which to switch to debug logging.
2869 public void enableDebug(Class
<?
> clazz
) {
2870 Logger l
= LoggerFactory
.getLogger(clazz
);
2871 if (l
instanceof Log4JLogger
) {
2872 ((Log4JLogger
) l
).getLogger().setLevel(org
.apache
.log4j
.Level
.DEBUG
);
2873 } else if (l
instanceof Log4jLoggerAdapter
) {
2874 LogManager
.getLogger(clazz
).setLevel(org
.apache
.log4j
.Level
.DEBUG
);
2875 } else if (l
instanceof Jdk14Logger
) {
2876 ((Jdk14Logger
) l
).getLogger().setLevel(java
.util
.logging
.Level
.ALL
);
2881 * Expire the Master's session
2884 public void expireMasterSession() throws Exception
{
2885 HMaster master
= getMiniHBaseCluster().getMaster();
2886 expireSession(master
.getZooKeeper(), false);
2890 * Expire a region server's session
2891 * @param index which RS
2893 public void expireRegionServerSession(int index
) throws Exception
{
2894 HRegionServer rs
= getMiniHBaseCluster().getRegionServer(index
);
2895 expireSession(rs
.getZooKeeper(), false);
2896 decrementMinRegionServerCount();
2899 private void decrementMinRegionServerCount() {
2900 // decrement the count for this.conf, for newly spwaned master
2901 // this.hbaseCluster shares this configuration too
2902 decrementMinRegionServerCount(getConfiguration());
2904 // each master thread keeps a copy of configuration
2905 for (MasterThread master
: getHBaseCluster().getMasterThreads()) {
2906 decrementMinRegionServerCount(master
.getMaster().getConfiguration());
2910 private void decrementMinRegionServerCount(Configuration conf
) {
2911 int currentCount
= conf
.getInt(
2912 ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
2913 if (currentCount
!= -1) {
2914 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
,
2915 Math
.max(currentCount
- 1, 1));
2919 public void expireSession(ZKWatcher nodeZK
) throws Exception
{
2920 expireSession(nodeZK
, false);
2924 * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2925 * http://hbase.apache.org/book.html#trouble.zookeeper
2926 * There are issues when doing this:
2927 * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2928 * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2930 * @param nodeZK - the ZK watcher to expire
2931 * @param checkStatus - true to check if we can create a Table with the
2932 * current configuration.
2934 public void expireSession(ZKWatcher nodeZK
, boolean checkStatus
)
2936 Configuration c
= new Configuration(this.conf
);
2937 String quorumServers
= ZKConfig
.getZKQuorumServersString(c
);
2938 ZooKeeper zk
= nodeZK
.getRecoverableZooKeeper().getZooKeeper();
2939 byte[] password
= zk
.getSessionPasswd();
2940 long sessionID
= zk
.getSessionId();
2942 // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2943 // so we create a first watcher to be sure that the
2944 // event was sent. We expect that if our watcher receives the event
2945 // other watchers on the same machine will get is as well.
2946 // When we ask to close the connection, ZK does not close it before
2947 // we receive all the events, so don't have to capture the event, just
2948 // closing the connection should be enough.
2949 ZooKeeper monitor
= new ZooKeeper(quorumServers
,
2950 1000, new org
.apache
.zookeeper
.Watcher(){
2952 public void process(WatchedEvent watchedEvent
) {
2953 LOG
.info("Monitor ZKW received event="+watchedEvent
);
2955 } , sessionID
, password
);
2958 ZooKeeper newZK
= new ZooKeeper(quorumServers
,
2959 1000, EmptyWatcher
.instance
, sessionID
, password
);
2961 //ensure that we have connection to the server before closing down, otherwise
2962 //the close session event will be eaten out before we start CONNECTING state
2963 long start
= System
.currentTimeMillis();
2964 while (newZK
.getState() != States
.CONNECTED
2965 && System
.currentTimeMillis() - start
< 1000) {
2969 LOG
.info("ZK Closed Session 0x" + Long
.toHexString(sessionID
));
2971 // Now closing & waiting to be sure that the clients get it.
2975 getConnection().getTable(TableName
.META_TABLE_NAME
).close();
2980 * Get the Mini HBase cluster.
2982 * @return hbase cluster
2983 * @see #getHBaseClusterInterface()
2985 public MiniHBaseCluster
getHBaseCluster() {
2986 return getMiniHBaseCluster();
2990 * Returns the HBaseCluster instance.
2991 * <p>Returned object can be any of the subclasses of HBaseCluster, and the
2992 * tests referring this should not assume that the cluster is a mini cluster or a
2993 * distributed one. If the test only works on a mini cluster, then specific
2994 * method {@link #getMiniHBaseCluster()} can be used instead w/o the
2995 * need to type-cast.
2997 public HBaseCluster
getHBaseClusterInterface() {
2998 //implementation note: we should rename this method as #getHBaseCluster(),
2999 //but this would require refactoring 90+ calls.
3000 return hbaseCluster
;
3003 private void initConnection() throws IOException
{
3004 User user
= UserProvider
.instantiate(conf
).getCurrent();
3005 this.asyncConnection
= ClusterConnectionFactory
.createAsyncClusterConnection(conf
, null, user
);
3009 * Resets the connections so that the next time getConnection() is called, a new connection is
3010 * created. This is needed in cases where the entire cluster / all the masters are shutdown and
3011 * the connection is not valid anymore.
3012 * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are
3013 * written, not all start() stop() calls go through this class. Most tests directly operate on
3014 * the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to
3015 * maintain the connection state automatically. Cleaning this is a much bigger refactor.
3017 public void invalidateConnection() throws IOException
{
3019 // Update the master addresses if they changed.
3020 final String masterConfigBefore
= conf
.get(HConstants
.MASTER_ADDRS_KEY
);
3021 final String masterConfAfter
= getMiniHBaseCluster().conf
.get(HConstants
.MASTER_ADDRS_KEY
);
3022 LOG
.info("Invalidated connection. Updating master addresses before: {} after: {}",
3023 masterConfigBefore
, masterConfAfter
);
3024 conf
.set(HConstants
.MASTER_ADDRS_KEY
,
3025 getMiniHBaseCluster().conf
.get(HConstants
.MASTER_ADDRS_KEY
));
3029 * Get a Connection to the cluster. Not thread-safe (This class needs a lot of work to make it
3031 * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
3033 public Connection
getConnection() throws IOException
{
3034 if (this.asyncConnection
== null) {
3037 return this.asyncConnection
.toConnection();
3040 public AsyncClusterConnection
getAsyncConnection() throws IOException
{
3041 if (this.asyncConnection
== null) {
3044 return this.asyncConnection
;
3047 public void closeConnection() throws IOException
{
3048 Closeables
.close(hbaseAdmin
, true);
3049 Closeables
.close(asyncConnection
, true);
3050 this.hbaseAdmin
= null;
3051 this.asyncConnection
= null;
3055 * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
3056 * Closing it has no effect, it will be closed automatically when the cluster shutdowns
3058 public synchronized Admin
getAdmin() throws IOException
{
3059 if (hbaseAdmin
== null){
3060 this.hbaseAdmin
= getConnection().getAdmin();
3065 private Admin hbaseAdmin
= null;
3068 * Returns an {@link Hbck} instance. Needs be closed when done.
3070 public Hbck
getHbck() throws IOException
{
3071 return getConnection().getHbck();
3075 * Unassign the named region.
3077 * @param regionName The region to unassign.
3079 public void unassignRegion(String regionName
) throws IOException
{
3080 unassignRegion(Bytes
.toBytes(regionName
));
3084 * Unassign the named region.
3086 * @param regionName The region to unassign.
3088 public void unassignRegion(byte[] regionName
) throws IOException
{
3089 getAdmin().unassign(regionName
, true);
3093 * Closes the region containing the given row.
3095 * @param row The row to find the containing region.
3096 * @param table The table to find the region.
3098 public void unassignRegionByRow(String row
, RegionLocator table
) throws IOException
{
3099 unassignRegionByRow(Bytes
.toBytes(row
), table
);
3103 * Closes the region containing the given row.
3105 * @param row The row to find the containing region.
3106 * @param table The table to find the region.
3107 * @throws IOException
3109 public void unassignRegionByRow(byte[] row
, RegionLocator table
) throws IOException
{
3110 HRegionLocation hrl
= table
.getRegionLocation(row
);
3111 unassignRegion(hrl
.getRegion().getRegionName());
3115 * Retrieves a splittable region randomly from tableName
3117 * @param tableName name of table
3118 * @param maxAttempts maximum number of attempts, unlimited for value of -1
3119 * @return the HRegion chosen, null if none was found within limit of maxAttempts
3121 public HRegion
getSplittableRegion(TableName tableName
, int maxAttempts
) {
3122 List
<HRegion
> regions
= getHBaseCluster().getRegions(tableName
);
3123 int regCount
= regions
.size();
3124 Set
<Integer
> attempted
= new HashSet
<>();
3128 regions
= getHBaseCluster().getRegions(tableName
);
3129 if (regCount
!= regions
.size()) {
3130 // if there was region movement, clear attempted Set
3133 regCount
= regions
.size();
3134 // There are chances that before we get the region for the table from an RS the region may
3135 // be going for CLOSE. This may be because online schema change is enabled
3137 idx
= random
.nextInt(regCount
);
3138 // if we have just tried this region, there is no need to try again
3139 if (attempted
.contains(idx
))
3142 regions
.get(idx
).checkSplit();
3143 return regions
.get(idx
);
3144 } catch (Exception ex
) {
3145 LOG
.warn("Caught exception", ex
);
3150 } while (maxAttempts
== -1 || attempts
< maxAttempts
);
3154 public MiniDFSCluster
getDFSCluster() {
3158 public void setDFSCluster(MiniDFSCluster cluster
) throws IllegalStateException
, IOException
{
3159 setDFSCluster(cluster
, true);
3163 * Set the MiniDFSCluster
3164 * @param cluster cluster to use
3165 * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3167 * @throws IllegalStateException if the passed cluster is up when it is required to be down
3168 * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3170 public void setDFSCluster(MiniDFSCluster cluster
, boolean requireDown
)
3171 throws IllegalStateException
, IOException
{
3172 if (dfsCluster
!= null && requireDown
&& dfsCluster
.isClusterUp()) {
3173 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3175 this.dfsCluster
= cluster
;
3179 public FileSystem
getTestFileSystem() throws IOException
{
3180 return HFileSystem
.get(conf
);
3184 * Wait until all regions in a table have been assigned. Waits default timeout before giving up
3186 * @param table Table to wait on.
3187 * @throws InterruptedException
3188 * @throws IOException
3190 public void waitTableAvailable(TableName table
)
3191 throws InterruptedException
, IOException
{
3192 waitTableAvailable(table
.getName(), 30000);
3195 public void waitTableAvailable(TableName table
, long timeoutMillis
)
3196 throws InterruptedException
, IOException
{
3197 waitFor(timeoutMillis
, predicateTableAvailable(table
));
3201 * Wait until all regions in a table have been assigned
3202 * @param table Table to wait on.
3203 * @param timeoutMillis Timeout.
3205 public void waitTableAvailable(byte[] table
, long timeoutMillis
)
3206 throws InterruptedException
, IOException
{
3207 waitFor(timeoutMillis
, predicateTableAvailable(TableName
.valueOf(table
)));
3210 public String
explainTableAvailability(TableName tableName
) throws IOException
{
3211 String msg
= explainTableState(tableName
, TableState
.State
.ENABLED
) + ", ";
3212 if (getHBaseCluster().getMaster().isAlive()) {
3213 Map
<RegionInfo
, ServerName
> assignments
= getHBaseCluster().getMaster().getAssignmentManager()
3214 .getRegionStates().getRegionAssignments();
3215 final List
<Pair
<RegionInfo
, ServerName
>> metaLocations
=
3216 MetaTableAccessor
.getTableRegionsAndLocations(asyncConnection
.toConnection(), tableName
);
3217 for (Pair
<RegionInfo
, ServerName
> metaLocation
: metaLocations
) {
3218 RegionInfo hri
= metaLocation
.getFirst();
3219 ServerName sn
= metaLocation
.getSecond();
3220 if (!assignments
.containsKey(hri
)) {
3221 msg
+= ", region " + hri
+ " not assigned, but found in meta, it expected to be on " + sn
;
3223 } else if (sn
== null) {
3224 msg
+= ", region " + hri
+ " assigned, but has no server in meta";
3225 } else if (!sn
.equals(assignments
.get(hri
))) {
3226 msg
+= ", region " + hri
+ " assigned, but has different servers in meta and AM ( " +
3227 sn
+ " <> " + assignments
.get(hri
);
3234 public String
explainTableState(final TableName table
, TableState
.State state
)
3235 throws IOException
{
3236 TableState tableState
= MetaTableAccessor
.getTableState(asyncConnection
.toConnection(), table
);
3237 if (tableState
== null) {
3238 return "TableState in META: No table state in META for table " + table
+
3239 " last state in meta (including deleted is " + findLastTableState(table
) + ")";
3240 } else if (!tableState
.inStates(state
)) {
3241 return "TableState in META: Not " + state
+ " state, but " + tableState
;
3243 return "TableState in META: OK";
3248 public TableState
findLastTableState(final TableName table
) throws IOException
{
3249 final AtomicReference
<TableState
> lastTableState
= new AtomicReference
<>(null);
3250 MetaTableAccessor
.Visitor visitor
= new MetaTableAccessor
.Visitor() {
3252 public boolean visit(Result r
) throws IOException
{
3253 if (!Arrays
.equals(r
.getRow(), table
.getName())) {
3256 TableState state
= MetaTableAccessor
.getTableState(r
);
3257 if (state
!= null) {
3258 lastTableState
.set(state
);
3263 MetaTableAccessor
.scanMeta(asyncConnection
.toConnection(), null, null,
3264 MetaTableAccessor
.QueryType
.TABLE
, Integer
.MAX_VALUE
, visitor
);
3265 return lastTableState
.get();
3269 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
3270 * regions have been all assigned. Will timeout after default period (30 seconds)
3271 * Tolerates nonexistent table.
3272 * @param table the table to wait on.
3273 * @throws InterruptedException if interrupted while waiting
3274 * @throws IOException if an IO problem is encountered
3276 public void waitTableEnabled(TableName table
)
3277 throws InterruptedException
, IOException
{
3278 waitTableEnabled(table
, 30000);
3282 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
3283 * regions have been all assigned.
3284 * @see #waitTableEnabled(TableName, long)
3285 * @param table Table to wait on.
3286 * @param timeoutMillis Time to wait on it being marked enabled.
3287 * @throws InterruptedException
3288 * @throws IOException
3290 public void waitTableEnabled(byte[] table
, long timeoutMillis
)
3291 throws InterruptedException
, IOException
{
3292 waitTableEnabled(TableName
.valueOf(table
), timeoutMillis
);
3295 public void waitTableEnabled(TableName table
, long timeoutMillis
)
3296 throws IOException
{
3297 waitFor(timeoutMillis
, predicateTableEnabled(table
));
3301 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3302 * Will timeout after default period (30 seconds)
3303 * @param table Table to wait on.
3304 * @throws InterruptedException
3305 * @throws IOException
3307 public void waitTableDisabled(byte[] table
)
3308 throws InterruptedException
, IOException
{
3309 waitTableDisabled(table
, 30000);
3312 public void waitTableDisabled(TableName table
, long millisTimeout
)
3313 throws InterruptedException
, IOException
{
3314 waitFor(millisTimeout
, predicateTableDisabled(table
));
3318 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3319 * @param table Table to wait on.
3320 * @param timeoutMillis Time to wait on it being marked disabled.
3321 * @throws InterruptedException
3322 * @throws IOException
3324 public void waitTableDisabled(byte[] table
, long timeoutMillis
)
3325 throws InterruptedException
, IOException
{
3326 waitTableDisabled(TableName
.valueOf(table
), timeoutMillis
);
3330 * Make sure that at least the specified number of region servers
3332 * @param num minimum number of region servers that should be running
3333 * @return true if we started some servers
3334 * @throws IOException
3336 public boolean ensureSomeRegionServersAvailable(final int num
)
3337 throws IOException
{
3338 boolean startedServer
= false;
3339 MiniHBaseCluster hbaseCluster
= getMiniHBaseCluster();
3340 for (int i
=hbaseCluster
.getLiveRegionServerThreads().size(); i
<num
; ++i
) {
3341 LOG
.info("Started new server=" + hbaseCluster
.startRegionServer());
3342 startedServer
= true;
3345 return startedServer
;
3350 * Make sure that at least the specified number of region servers
3351 * are running. We don't count the ones that are currently stopping or are
3353 * @param num minimum number of region servers that should be running
3354 * @return true if we started some servers
3355 * @throws IOException
3357 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num
)
3358 throws IOException
{
3359 boolean startedServer
= ensureSomeRegionServersAvailable(num
);
3361 int nonStoppedServers
= 0;
3362 for (JVMClusterUtil
.RegionServerThread rst
:
3363 getMiniHBaseCluster().getRegionServerThreads()) {
3365 HRegionServer hrs
= rst
.getRegionServer();
3366 if (hrs
.isStopping() || hrs
.isStopped()) {
3367 LOG
.info("A region server is stopped or stopping:"+hrs
);
3369 nonStoppedServers
++;
3372 for (int i
=nonStoppedServers
; i
<num
; ++i
) {
3373 LOG
.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3374 startedServer
= true;
3376 return startedServer
;
3381 * This method clones the passed <code>c</code> configuration setting a new
3382 * user into the clone. Use it getting new instances of FileSystem. Only
3383 * works for DistributedFileSystem w/o Kerberos.
3384 * @param c Initial configuration
3385 * @param differentiatingSuffix Suffix to differentiate this user from others.
3386 * @return A new configuration instance with a different user set into it.
3387 * @throws IOException
3389 public static User
getDifferentUser(final Configuration c
,
3390 final String differentiatingSuffix
)
3391 throws IOException
{
3392 FileSystem currentfs
= FileSystem
.get(c
);
3393 if (!(currentfs
instanceof DistributedFileSystem
) || User
.isHBaseSecurityEnabled(c
)) {
3394 return User
.getCurrent();
3396 // Else distributed filesystem. Make a new instance per daemon. Below
3397 // code is taken from the AppendTestUtil over in hdfs.
3398 String username
= User
.getCurrent().getName() +
3399 differentiatingSuffix
;
3400 User user
= User
.createUserForTesting(c
, username
,
3401 new String
[]{"supergroup"});
3405 public static NavigableSet
<String
> getAllOnlineRegions(MiniHBaseCluster cluster
)
3406 throws IOException
{
3407 NavigableSet
<String
> online
= new TreeSet
<>();
3408 for (RegionServerThread rst
: cluster
.getLiveRegionServerThreads()) {
3410 for (RegionInfo region
:
3411 ProtobufUtil
.getOnlineRegions(rst
.getRegionServer().getRSRpcServices())) {
3412 online
.add(region
.getRegionNameAsString());
3414 } catch (RegionServerStoppedException e
) {
3418 for (MasterThread mt
: cluster
.getLiveMasterThreads()) {
3420 for (RegionInfo region
:
3421 ProtobufUtil
.getOnlineRegions(mt
.getMaster().getRSRpcServices())) {
3422 online
.add(region
.getRegionNameAsString());
3424 } catch (RegionServerStoppedException e
) {
3426 } catch (ServerNotRunningYetException e
) {
3434 * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and
3435 * makes tests linger. Here is the exception you'll see:
3437 * 2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3438 * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3439 * blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683
3440 * failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3442 * @param stream A DFSClient.DFSOutputStream.
3444 * @throws NoSuchFieldException
3445 * @throws SecurityException
3446 * @throws IllegalAccessException
3447 * @throws IllegalArgumentException
3449 public static void setMaxRecoveryErrorCount(final OutputStream stream
,
3452 Class
<?
> [] clazzes
= DFSClient
.class.getDeclaredClasses();
3453 for (Class
<?
> clazz
: clazzes
) {
3454 String className
= clazz
.getSimpleName();
3455 if (className
.equals("DFSOutputStream")) {
3456 if (clazz
.isInstance(stream
)) {
3457 Field maxRecoveryErrorCountField
=
3458 stream
.getClass().getDeclaredField("maxRecoveryErrorCount");
3459 maxRecoveryErrorCountField
.setAccessible(true);
3460 maxRecoveryErrorCountField
.setInt(stream
, max
);
3465 } catch (Exception e
) {
3466 LOG
.info("Could not set max recovery field", e
);
3471 * Uses directly the assignment manager to assign the region. and waits until the specified region
3472 * has completed assignment.
3473 * @return true if the region is assigned false otherwise.
3475 public boolean assignRegion(final RegionInfo regionInfo
)
3476 throws IOException
, InterruptedException
{
3477 final AssignmentManager am
= getHBaseCluster().getMaster().getAssignmentManager();
3478 am
.assign(regionInfo
);
3479 return AssignmentTestingUtil
.waitForAssignment(am
, regionInfo
);
3483 * Move region to destination server and wait till region is completely moved and online
3485 * @param destRegion region to move
3486 * @param destServer destination server of the region
3487 * @throws InterruptedException
3488 * @throws IOException
3490 public void moveRegionAndWait(RegionInfo destRegion
, ServerName destServer
)
3491 throws InterruptedException
, IOException
{
3492 HMaster master
= getMiniHBaseCluster().getMaster();
3493 // TODO: Here we start the move. The move can take a while.
3494 getAdmin().move(destRegion
.getEncodedNameAsBytes(), destServer
);
3496 ServerName serverName
= master
.getAssignmentManager().getRegionStates()
3497 .getRegionServerOfRegion(destRegion
);
3498 if (serverName
!= null && serverName
.equals(destServer
)) {
3499 assertRegionOnServer(destRegion
, serverName
, 2000);
3507 * Wait until all regions for a table in hbase:meta have a non-empty
3508 * info:server, up to a configuable timeout value (default is 60 seconds)
3509 * This means all regions have been deployed,
3510 * master has been informed and updated hbase:meta with the regions deployed
3512 * @param tableName the table name
3513 * @throws IOException
3515 public void waitUntilAllRegionsAssigned(final TableName tableName
) throws IOException
{
3516 waitUntilAllRegionsAssigned(tableName
,
3517 this.conf
.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3521 * Waith until all system table's regions get assigned
3522 * @throws IOException
3524 public void waitUntilAllSystemRegionsAssigned() throws IOException
{
3525 waitUntilAllRegionsAssigned(TableName
.META_TABLE_NAME
);
3529 * Wait until all regions for a table in hbase:meta have a non-empty
3530 * info:server, or until timeout. This means all regions have been deployed,
3531 * master has been informed and updated hbase:meta with the regions deployed
3533 * @param tableName the table name
3534 * @param timeout timeout, in milliseconds
3535 * @throws IOException
3537 public void waitUntilAllRegionsAssigned(final TableName tableName
, final long timeout
)
3538 throws IOException
{
3539 if (!TableName
.isMetaTableName(tableName
)) {
3540 try (final Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
)) {
3541 LOG
.debug("Waiting until all regions of table " + tableName
+ " get assigned. Timeout = " +
3543 waitFor(timeout
, 200, true, new ExplainingPredicate
<IOException
>() {
3545 public String
explainFailure() throws IOException
{
3546 return explainTableAvailability(tableName
);
3550 public boolean evaluate() throws IOException
{
3551 Scan scan
= new Scan();
3552 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
3553 boolean tableFound
= false;
3554 try (ResultScanner s
= meta
.getScanner(scan
)) {
3555 for (Result r
; (r
= s
.next()) != null;) {
3556 byte[] b
= r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.REGIONINFO_QUALIFIER
);
3557 HRegionInfo info
= HRegionInfo
.parseFromOrNull(b
);
3558 if (info
!= null && info
.getTable().equals(tableName
)) {
3559 // Get server hosting this region from catalog family. Return false if no server
3560 // hosting this region, or if the server hosting this region was recently killed
3561 // (for fault tolerance testing).
3564 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.SERVER_QUALIFIER
);
3565 if (server
== null) {
3569 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.STARTCODE_QUALIFIER
);
3570 ServerName serverName
=
3571 ServerName
.valueOf(Bytes
.toString(server
).replaceFirst(":", ",") + "," +
3572 Bytes
.toLong(startCode
));
3573 if (!getHBaseClusterInterface().isDistributedCluster() &&
3574 getHBaseCluster().isKilledRS(serverName
)) {
3578 if (RegionStateStore
.getRegionState(r
, info
) != RegionState
.State
.OPEN
) {
3585 LOG
.warn("Didn't find the entries for table " + tableName
+ " in meta, already deleted?");
3592 LOG
.info("All regions for table " + tableName
+ " assigned to meta. Checking AM states.");
3593 // check from the master state if we are using a mini cluster
3594 if (!getHBaseClusterInterface().isDistributedCluster()) {
3595 // So, all regions are in the meta table but make sure master knows of the assignments before
3596 // returning -- sometimes this can lag.
3597 HMaster master
= getHBaseCluster().getMaster();
3598 final RegionStates states
= master
.getAssignmentManager().getRegionStates();
3599 waitFor(timeout
, 200, new ExplainingPredicate
<IOException
>() {
3601 public String
explainFailure() throws IOException
{
3602 return explainTableAvailability(tableName
);
3606 public boolean evaluate() throws IOException
{
3607 List
<RegionInfo
> hris
= states
.getRegionsOfTable(tableName
);
3608 return hris
!= null && !hris
.isEmpty();
3612 LOG
.info("All regions for table " + tableName
+ " assigned.");
3616 * Do a small get/scan against one store. This is required because store
3617 * has no actual methods of querying itself, and relies on StoreScanner.
3619 public static List
<Cell
> getFromStoreFile(HStore store
,
3620 Get get
) throws IOException
{
3621 Scan scan
= new Scan(get
);
3622 InternalScanner scanner
= (InternalScanner
) store
.getScanner(scan
,
3623 scan
.getFamilyMap().get(store
.getColumnFamilyDescriptor().getName()),
3624 // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3628 List
<Cell
> result
= new ArrayList
<>();
3629 scanner
.next(result
);
3630 if (!result
.isEmpty()) {
3631 // verify that we are on the row we want:
3632 Cell kv
= result
.get(0);
3633 if (!CellUtil
.matchingRows(kv
, get
.getRow())) {
3642 * Create region split keys between startkey and endKey
3646 * @param numRegions the number of regions to be created. it has to be greater than 3.
3647 * @return resulting split keys
3649 public byte[][] getRegionSplitStartKeys(byte[] startKey
, byte[] endKey
, int numRegions
){
3650 assertTrue(numRegions
>3);
3651 byte [][] tmpSplitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
3652 byte [][] result
= new byte[tmpSplitKeys
.length
+1][];
3653 System
.arraycopy(tmpSplitKeys
, 0, result
, 1, tmpSplitKeys
.length
);
3654 result
[0] = HConstants
.EMPTY_BYTE_ARRAY
;
3659 * Do a small get/scan against one store. This is required because store
3660 * has no actual methods of querying itself, and relies on StoreScanner.
3662 public static List
<Cell
> getFromStoreFile(HStore store
,
3664 NavigableSet
<byte[]> columns
3665 ) throws IOException
{
3666 Get get
= new Get(row
);
3667 Map
<byte[], NavigableSet
<byte[]>> s
= get
.getFamilyMap();
3668 s
.put(store
.getColumnFamilyDescriptor().getName(), columns
);
3670 return getFromStoreFile(store
,get
);
3673 public static void assertKVListsEqual(String additionalMsg
,
3674 final List
<?
extends Cell
> expected
,
3675 final List
<?
extends Cell
> actual
) {
3676 final int eLen
= expected
.size();
3677 final int aLen
= actual
.size();
3678 final int minLen
= Math
.min(eLen
, aLen
);
3681 for (i
= 0; i
< minLen
3682 && CellComparator
.getInstance().compare(expected
.get(i
), actual
.get(i
)) == 0;
3685 if (additionalMsg
== null) {
3688 if (!additionalMsg
.isEmpty()) {
3689 additionalMsg
= ". " + additionalMsg
;
3692 if (eLen
!= aLen
|| i
!= minLen
) {
3693 throw new AssertionError(
3694 "Expected and actual KV arrays differ at position " + i
+ ": " +
3695 safeGetAsStr(expected
, i
) + " (length " + eLen
+") vs. " +
3696 safeGetAsStr(actual
, i
) + " (length " + aLen
+ ")" + additionalMsg
);
3700 public static <T
> String
safeGetAsStr(List
<T
> lst
, int i
) {
3701 if (0 <= i
&& i
< lst
.size()) {
3702 return lst
.get(i
).toString();
3704 return "<out_of_range>";
3708 public String
getClusterKey() {
3709 return conf
.get(HConstants
.ZOOKEEPER_QUORUM
) + ":"
3710 + conf
.get(HConstants
.ZOOKEEPER_CLIENT_PORT
) + ":"
3711 + conf
.get(HConstants
.ZOOKEEPER_ZNODE_PARENT
,
3712 HConstants
.DEFAULT_ZOOKEEPER_ZNODE_PARENT
);
3715 /** Creates a random table with the given parameters */
3716 public Table
createRandomTable(TableName tableName
,
3717 final Collection
<String
> families
,
3718 final int maxVersions
,
3719 final int numColsPerRow
,
3720 final int numFlushes
,
3721 final int numRegions
,
3722 final int numRowsPerFlush
)
3723 throws IOException
, InterruptedException
{
3725 LOG
.info("\n\nCreating random table " + tableName
+ " with " + numRegions
+
3726 " regions, " + numFlushes
+ " storefiles per region, " +
3727 numRowsPerFlush
+ " rows per flush, maxVersions=" + maxVersions
+
3730 final Random rand
= new Random(tableName
.hashCode() * 17L + 12938197137L);
3731 final int numCF
= families
.size();
3732 final byte[][] cfBytes
= new byte[numCF
][];
3735 for (String cf
: families
) {
3736 cfBytes
[cfIndex
++] = Bytes
.toBytes(cf
);
3740 final int actualStartKey
= 0;
3741 final int actualEndKey
= Integer
.MAX_VALUE
;
3742 final int keysPerRegion
= (actualEndKey
- actualStartKey
) / numRegions
;
3743 final int splitStartKey
= actualStartKey
+ keysPerRegion
;
3744 final int splitEndKey
= actualEndKey
- keysPerRegion
;
3745 final String keyFormat
= "%08x";
3746 final Table table
= createTable(tableName
, cfBytes
,
3748 Bytes
.toBytes(String
.format(keyFormat
, splitStartKey
)),
3749 Bytes
.toBytes(String
.format(keyFormat
, splitEndKey
)),
3752 if (hbaseCluster
!= null) {
3753 getMiniHBaseCluster().flushcache(TableName
.META_TABLE_NAME
);
3756 BufferedMutator mutator
= getConnection().getBufferedMutator(tableName
);
3758 for (int iFlush
= 0; iFlush
< numFlushes
; ++iFlush
) {
3759 for (int iRow
= 0; iRow
< numRowsPerFlush
; ++iRow
) {
3760 final byte[] row
= Bytes
.toBytes(String
.format(keyFormat
,
3761 actualStartKey
+ rand
.nextInt(actualEndKey
- actualStartKey
)));
3763 Put put
= new Put(row
);
3764 Delete del
= new Delete(row
);
3765 for (int iCol
= 0; iCol
< numColsPerRow
; ++iCol
) {
3766 final byte[] cf
= cfBytes
[rand
.nextInt(numCF
)];
3767 final long ts
= rand
.nextInt();
3768 final byte[] qual
= Bytes
.toBytes("col" + iCol
);
3769 if (rand
.nextBoolean()) {
3770 final byte[] value
= Bytes
.toBytes("value_for_row_" + iRow
+
3771 "_cf_" + Bytes
.toStringBinary(cf
) + "_col_" + iCol
+ "_ts_" +
3772 ts
+ "_random_" + rand
.nextLong());
3773 put
.addColumn(cf
, qual
, ts
, value
);
3774 } else if (rand
.nextDouble() < 0.8) {
3775 del
.addColumn(cf
, qual
, ts
);
3777 del
.addColumns(cf
, qual
, ts
);
3781 if (!put
.isEmpty()) {
3782 mutator
.mutate(put
);
3785 if (!del
.isEmpty()) {
3786 mutator
.mutate(del
);
3789 LOG
.info("Initiating flush #" + iFlush
+ " for table " + tableName
);
3791 if (hbaseCluster
!= null) {
3792 getMiniHBaseCluster().flushcache(table
.getName());
3800 private static Random random
= new Random();
3802 private static final PortAllocator portAllocator
= new PortAllocator(random
);
3804 public static int randomFreePort() {
3805 return portAllocator
.randomFreePort();
3808 static class PortAllocator
{
3809 private static final int MIN_RANDOM_PORT
= 0xc000;
3810 private static final int MAX_RANDOM_PORT
= 0xfffe;
3812 /** A set of ports that have been claimed using {@link #randomFreePort()}. */
3813 private final Set
<Integer
> takenRandomPorts
= new HashSet
<>();
3815 private final Random random
;
3816 private final AvailablePortChecker portChecker
;
3818 public PortAllocator(Random random
) {
3819 this.random
= random
;
3820 this.portChecker
= new AvailablePortChecker() {
3822 public boolean available(int port
) {
3824 ServerSocket sock
= new ServerSocket(port
);
3827 } catch (IOException ex
) {
3834 public PortAllocator(Random random
, AvailablePortChecker portChecker
) {
3835 this.random
= random
;
3836 this.portChecker
= portChecker
;
3840 * Returns a random free port and marks that port as taken. Not thread-safe. Expected to be
3841 * called from single-threaded test setup code/
3843 public int randomFreePort() {
3846 port
= randomPort();
3847 if (takenRandomPorts
.contains(port
)) {
3851 takenRandomPorts
.add(port
);
3853 if (!portChecker
.available(port
)) {
3856 } while (port
== 0);
3861 * Returns a random port. These ports cannot be registered with IANA and are
3862 * intended for dynamic allocation (see http://bit.ly/dynports).
3864 private int randomPort() {
3865 return MIN_RANDOM_PORT
3866 + random
.nextInt(MAX_RANDOM_PORT
- MIN_RANDOM_PORT
);
3869 interface AvailablePortChecker
{
3870 boolean available(int port
);
3874 public static String
randomMultiCastAddress() {
3875 return "226.1.1." + random
.nextInt(254);
3878 public static void waitForHostPort(String host
, int port
)
3879 throws IOException
{
3880 final int maxTimeMs
= 10000;
3881 final int maxNumAttempts
= maxTimeMs
/ HConstants
.SOCKET_RETRY_WAIT_MS
;
3882 IOException savedException
= null;
3883 LOG
.info("Waiting for server at " + host
+ ":" + port
);
3884 for (int attempt
= 0; attempt
< maxNumAttempts
; ++attempt
) {
3886 Socket sock
= new Socket(InetAddress
.getByName(host
), port
);
3888 savedException
= null;
3889 LOG
.info("Server at " + host
+ ":" + port
+ " is available");
3891 } catch (UnknownHostException e
) {
3892 throw new IOException("Failed to look up " + host
, e
);
3893 } catch (IOException e
) {
3896 Threads
.sleepWithoutInterrupt(HConstants
.SOCKET_RETRY_WAIT_MS
);
3899 if (savedException
!= null) {
3900 throw savedException
;
3905 * Creates a pre-split table for load testing. If the table already exists,
3906 * logs a warning and continues.
3907 * @return the number of regions the table was split into
3909 public static int createPreSplitLoadTestTable(Configuration conf
,
3910 TableName tableName
, byte[] columnFamily
, Algorithm compression
,
3911 DataBlockEncoding dataBlockEncoding
) throws IOException
{
3912 return createPreSplitLoadTestTable(conf
, tableName
,
3913 columnFamily
, compression
, dataBlockEncoding
, DEFAULT_REGIONS_PER_SERVER
, 1,
3914 Durability
.USE_DEFAULT
);
3917 * Creates a pre-split table for load testing. If the table already exists,
3918 * logs a warning and continues.
3919 * @return the number of regions the table was split into
3921 public static int createPreSplitLoadTestTable(Configuration conf
,
3922 TableName tableName
, byte[] columnFamily
, Algorithm compression
,
3923 DataBlockEncoding dataBlockEncoding
, int numRegionsPerServer
, int regionReplication
,
3924 Durability durability
)
3925 throws IOException
{
3926 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
3927 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
3928 tableDescriptor
.setDurability(durability
);
3929 tableDescriptor
.setRegionReplication(regionReplication
);
3930 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
3931 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(columnFamily
);
3932 familyDescriptor
.setDataBlockEncoding(dataBlockEncoding
);
3933 familyDescriptor
.setCompressionType(compression
);
3934 return createPreSplitLoadTestTable(conf
, tableDescriptor
, familyDescriptor
,
3935 numRegionsPerServer
);
3939 * Creates a pre-split table for load testing. If the table already exists,
3940 * logs a warning and continues.
3941 * @return the number of regions the table was split into
3943 public static int createPreSplitLoadTestTable(Configuration conf
,
3944 TableName tableName
, byte[][] columnFamilies
, Algorithm compression
,
3945 DataBlockEncoding dataBlockEncoding
, int numRegionsPerServer
, int regionReplication
,
3946 Durability durability
)
3947 throws IOException
{
3948 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
3949 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
3950 tableDescriptor
.setDurability(durability
);
3951 tableDescriptor
.setRegionReplication(regionReplication
);
3952 ColumnFamilyDescriptor
[] hcds
= new ColumnFamilyDescriptor
[columnFamilies
.length
];
3953 for (int i
= 0; i
< columnFamilies
.length
; i
++) {
3954 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
3955 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(columnFamilies
[i
]);
3956 familyDescriptor
.setDataBlockEncoding(dataBlockEncoding
);
3957 familyDescriptor
.setCompressionType(compression
);
3958 hcds
[i
] = familyDescriptor
;
3960 return createPreSplitLoadTestTable(conf
, tableDescriptor
, hcds
, numRegionsPerServer
);
3964 * Creates a pre-split table for load testing. If the table already exists,
3965 * logs a warning and continues.
3966 * @return the number of regions the table was split into
3968 public static int createPreSplitLoadTestTable(Configuration conf
,
3969 TableDescriptor desc
, ColumnFamilyDescriptor hcd
) throws IOException
{
3970 return createPreSplitLoadTestTable(conf
, desc
, hcd
, DEFAULT_REGIONS_PER_SERVER
);
3974 * Creates a pre-split table for load testing. If the table already exists,
3975 * logs a warning and continues.
3976 * @return the number of regions the table was split into
3978 public static int createPreSplitLoadTestTable(Configuration conf
,
3979 TableDescriptor desc
, ColumnFamilyDescriptor hcd
, int numRegionsPerServer
) throws IOException
{
3980 return createPreSplitLoadTestTable(conf
, desc
, new ColumnFamilyDescriptor
[] {hcd
},
3981 numRegionsPerServer
);
3985 * Creates a pre-split table for load testing. If the table already exists,
3986 * logs a warning and continues.
3987 * @return the number of regions the table was split into
3989 public static int createPreSplitLoadTestTable(Configuration conf
,
3990 TableDescriptor desc
, ColumnFamilyDescriptor
[] hcds
,
3991 int numRegionsPerServer
) throws IOException
{
3992 return createPreSplitLoadTestTable(conf
, desc
, hcds
,
3993 new RegionSplitter
.HexStringSplit(), numRegionsPerServer
);
3997 * Creates a pre-split table for load testing. If the table already exists,
3998 * logs a warning and continues.
3999 * @return the number of regions the table was split into
4001 public static int createPreSplitLoadTestTable(Configuration conf
,
4002 TableDescriptor td
, ColumnFamilyDescriptor
[] cds
,
4003 SplitAlgorithm splitter
, int numRegionsPerServer
) throws IOException
{
4004 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(td
);
4005 for (ColumnFamilyDescriptor cd
: cds
) {
4006 if (!td
.hasColumnFamily(cd
.getName())) {
4007 builder
.setColumnFamily(cd
);
4010 td
= builder
.build();
4011 int totalNumberOfRegions
= 0;
4012 Connection unmanagedConnection
= ConnectionFactory
.createConnection(conf
);
4013 Admin admin
= unmanagedConnection
.getAdmin();
4016 // create a table a pre-splits regions.
4017 // The number of splits is set as:
4018 // region servers * regions per region server).
4019 int numberOfServers
= admin
.getRegionServers().size();
4020 if (numberOfServers
== 0) {
4021 throw new IllegalStateException("No live regionservers");
4024 totalNumberOfRegions
= numberOfServers
* numRegionsPerServer
;
4025 LOG
.info("Number of live regionservers: " + numberOfServers
+ ", " +
4026 "pre-splitting table into " + totalNumberOfRegions
+ " regions " +
4027 "(regions per server: " + numRegionsPerServer
+ ")");
4029 byte[][] splits
= splitter
.split(
4030 totalNumberOfRegions
);
4032 admin
.createTable(td
, splits
);
4033 } catch (MasterNotRunningException e
) {
4034 LOG
.error("Master not running", e
);
4035 throw new IOException(e
);
4036 } catch (TableExistsException e
) {
4037 LOG
.warn("Table " + td
.getTableName() +
4038 " already exists, continuing");
4041 unmanagedConnection
.close();
4043 return totalNumberOfRegions
;
4046 public static int getMetaRSPort(Connection connection
) throws IOException
{
4047 try (RegionLocator locator
= connection
.getRegionLocator(TableName
.META_TABLE_NAME
)) {
4048 return locator
.getRegionLocation(Bytes
.toBytes("")).getPort();
4053 * Due to async racing issue, a region may not be in
4054 * the online region list of a region server yet, after
4055 * the assignment znode is deleted and the new assignment
4056 * is recorded in master.
4058 public void assertRegionOnServer(
4059 final RegionInfo hri
, final ServerName server
,
4060 final long timeout
) throws IOException
, InterruptedException
{
4061 long timeoutTime
= System
.currentTimeMillis() + timeout
;
4063 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
4064 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) return;
4065 long now
= System
.currentTimeMillis();
4066 if (now
> timeoutTime
) break;
4069 fail("Could not find region " + hri
.getRegionNameAsString()
4070 + " on server " + server
);
4074 * Check to make sure the region is open on the specified
4075 * region server, but not on any other one.
4077 public void assertRegionOnlyOnServer(
4078 final RegionInfo hri
, final ServerName server
,
4079 final long timeout
) throws IOException
, InterruptedException
{
4080 long timeoutTime
= System
.currentTimeMillis() + timeout
;
4082 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
4083 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) {
4084 List
<JVMClusterUtil
.RegionServerThread
> rsThreads
=
4085 getHBaseCluster().getLiveRegionServerThreads();
4086 for (JVMClusterUtil
.RegionServerThread rsThread
: rsThreads
) {
4087 HRegionServer rs
= rsThread
.getRegionServer();
4088 if (server
.equals(rs
.getServerName())) {
4091 Collection
<HRegion
> hrs
= rs
.getOnlineRegionsLocalContext();
4092 for (HRegion r
: hrs
) {
4093 assertTrue("Region should not be double assigned",
4094 r
.getRegionInfo().getRegionId() != hri
.getRegionId());
4097 return; // good, we are happy
4099 long now
= System
.currentTimeMillis();
4100 if (now
> timeoutTime
) break;
4103 fail("Could not find region " + hri
.getRegionNameAsString()
4104 + " on server " + server
);
4107 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
) throws IOException
{
4108 TableDescriptor td
=
4109 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
4110 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
4111 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
);
4114 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
,
4115 BlockCache blockCache
) throws IOException
{
4116 TableDescriptor td
=
4117 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
4118 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
4119 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
, blockCache
);
4122 public void setFileSystemURI(String fsURI
) {
4127 * Returns a {@link Predicate} for checking that there are no regions in transition in master
4129 public ExplainingPredicate
<IOException
> predicateNoRegionsInTransition() {
4130 return new ExplainingPredicate
<IOException
>() {
4132 public String
explainFailure() throws IOException
{
4133 final RegionStates regionStates
= getMiniHBaseCluster().getMaster()
4134 .getAssignmentManager().getRegionStates();
4135 return "found in transition: " + regionStates
.getRegionsInTransition().toString();
4139 public boolean evaluate() throws IOException
{
4140 HMaster master
= getMiniHBaseCluster().getMaster();
4141 if (master
== null) return false;
4142 AssignmentManager am
= master
.getAssignmentManager();
4143 if (am
== null) return false;
4144 return !am
.hasRegionsInTransition();
4150 * Returns a {@link Predicate} for checking that table is enabled
4152 public Waiter
.Predicate
<IOException
> predicateTableEnabled(final TableName tableName
) {
4153 return new ExplainingPredicate
<IOException
>() {
4155 public String
explainFailure() throws IOException
{
4156 return explainTableState(tableName
, TableState
.State
.ENABLED
);
4160 public boolean evaluate() throws IOException
{
4161 return getAdmin().tableExists(tableName
) && getAdmin().isTableEnabled(tableName
);
4167 * Returns a {@link Predicate} for checking that table is enabled
4169 public Waiter
.Predicate
<IOException
> predicateTableDisabled(final TableName tableName
) {
4170 return new ExplainingPredicate
<IOException
>() {
4172 public String
explainFailure() throws IOException
{
4173 return explainTableState(tableName
, TableState
.State
.DISABLED
);
4177 public boolean evaluate() throws IOException
{
4178 return getAdmin().isTableDisabled(tableName
);
4184 * Returns a {@link Predicate} for checking that table is enabled
4186 public Waiter
.Predicate
<IOException
> predicateTableAvailable(final TableName tableName
) {
4187 return new ExplainingPredicate
<IOException
>() {
4189 public String
explainFailure() throws IOException
{
4190 return explainTableAvailability(tableName
);
4194 public boolean evaluate() throws IOException
{
4195 boolean tableAvailable
= getAdmin().isTableAvailable(tableName
);
4196 if (tableAvailable
) {
4197 try (Table table
= getConnection().getTable(tableName
)) {
4198 TableDescriptor htd
= table
.getDescriptor();
4199 for (HRegionLocation loc
: getConnection().getRegionLocator(tableName
)
4200 .getAllRegionLocations()) {
4201 Scan scan
= new Scan().withStartRow(loc
.getRegion().getStartKey())
4202 .withStopRow(loc
.getRegion().getEndKey()).setOneRowLimit()
4203 .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4204 for (byte[] family
: htd
.getColumnFamilyNames()) {
4205 scan
.addFamily(family
);
4207 try (ResultScanner scanner
= table
.getScanner(scan
)) {
4213 return tableAvailable
;
4219 * Wait until no regions in transition.
4220 * @param timeout How long to wait.
4221 * @throws IOException
4223 public void waitUntilNoRegionsInTransition(final long timeout
) throws IOException
{
4224 waitFor(timeout
, predicateNoRegionsInTransition());
4228 * Wait until no regions in transition. (time limit 15min)
4229 * @throws IOException
4231 public void waitUntilNoRegionsInTransition() throws IOException
{
4232 waitUntilNoRegionsInTransition(15 * 60000);
4236 * Wait until labels is ready in VisibilityLabelsCache.
4237 * @param timeoutMillis
4240 public void waitLabelAvailable(long timeoutMillis
, final String
... labels
) {
4241 final VisibilityLabelsCache labelsCache
= VisibilityLabelsCache
.get();
4242 waitFor(timeoutMillis
, new Waiter
.ExplainingPredicate
<RuntimeException
>() {
4245 public boolean evaluate() {
4246 for (String label
: labels
) {
4247 if (labelsCache
.getLabelOrdinal(label
) == 0) {
4255 public String
explainFailure() {
4256 for (String label
: labels
) {
4257 if (labelsCache
.getLabelOrdinal(label
) == 0) {
4258 return label
+ " is not available yet";
4267 * Create a set of column descriptors with the combination of compression,
4268 * encoding, bloom codecs available.
4269 * @return the list of column descriptors
4271 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors() {
4272 return generateColumnDescriptors("");
4276 * Create a set of column descriptors with the combination of compression,
4277 * encoding, bloom codecs available.
4278 * @param prefix family names prefix
4279 * @return the list of column descriptors
4281 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors(final String prefix
) {
4282 List
<ColumnFamilyDescriptor
> columnFamilyDescriptors
= new ArrayList
<>();
4284 for (Compression
.Algorithm compressionType
: getSupportedCompressionAlgorithms()) {
4285 for (DataBlockEncoding encodingType
: DataBlockEncoding
.values()) {
4286 for (BloomType bloomType
: BloomType
.values()) {
4287 String name
= String
.format("%s-cf-!@#&-%d!@#", prefix
, familyId
);
4288 ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder
=
4289 ColumnFamilyDescriptorBuilder
.newBuilder(Bytes
.toBytes(name
));
4290 columnFamilyDescriptorBuilder
.setCompressionType(compressionType
);
4291 columnFamilyDescriptorBuilder
.setDataBlockEncoding(encodingType
);
4292 columnFamilyDescriptorBuilder
.setBloomFilterType(bloomType
);
4293 columnFamilyDescriptors
.add(columnFamilyDescriptorBuilder
.build());
4298 return columnFamilyDescriptors
;
4302 * Get supported compression algorithms.
4303 * @return supported compression algorithms.
4305 public static Compression
.Algorithm
[] getSupportedCompressionAlgorithms() {
4306 String
[] allAlgos
= HFile
.getSupportedCompressionAlgorithms();
4307 List
<Compression
.Algorithm
> supportedAlgos
= new ArrayList
<>();
4308 for (String algoName
: allAlgos
) {
4310 Compression
.Algorithm algo
= Compression
.getCompressionAlgorithmByName(algoName
);
4311 algo
.getCompressor();
4312 supportedAlgos
.add(algo
);
4313 } catch (Throwable t
) {
4314 // this algo is not available
4317 return supportedAlgos
.toArray(new Algorithm
[supportedAlgos
.size()]);
4320 public Result
getClosestRowBefore(Region r
, byte[] row
, byte[] family
) throws IOException
{
4321 Scan scan
= new Scan(row
);
4322 scan
.setSmall(true);
4324 scan
.setReversed(true);
4325 scan
.addFamily(family
);
4326 try (RegionScanner scanner
= r
.getScanner(scan
)) {
4327 List
<Cell
> cells
= new ArrayList
<>(1);
4328 scanner
.next(cells
);
4329 if (r
.getRegionInfo().isMetaRegion() && !isTargetTable(row
, cells
.get(0))) {
4332 return Result
.create(cells
);
4336 private boolean isTargetTable(final byte[] inRow
, Cell c
) {
4337 String inputRowString
= Bytes
.toString(inRow
);
4338 int i
= inputRowString
.indexOf(HConstants
.DELIMITER
);
4339 String outputRowString
= Bytes
.toString(c
.getRowArray(), c
.getRowOffset(), c
.getRowLength());
4340 int o
= outputRowString
.indexOf(HConstants
.DELIMITER
);
4341 return inputRowString
.substring(0, i
).equals(outputRowString
.substring(0, o
));
4345 * Sets up {@link MiniKdc} for testing security.
4346 * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4347 * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4349 public MiniKdc
setupMiniKdc(File keytabFile
) throws Exception
{
4350 Properties conf
= MiniKdc
.createConf();
4351 conf
.put(MiniKdc
.DEBUG
, true);
4354 // There is time lag between selecting a port and trying to bind with it. It's possible that
4355 // another service captures the port in between which'll result in BindException.
4356 boolean bindException
;
4360 bindException
= false;
4361 dir
= new File(getDataTestDir("kdc").toUri().getPath());
4362 kdc
= new MiniKdc(conf
, dir
);
4364 } catch (BindException e
) {
4365 FileUtils
.deleteDirectory(dir
); // clean directory
4367 if (numTries
== 3) {
4368 LOG
.error("Failed setting up MiniKDC. Tried " + numTries
+ " times.");
4371 LOG
.error("BindException encountered when setting up MiniKdc. Trying again.");
4372 bindException
= true;
4374 } while (bindException
);
4375 HBaseKerberosUtils
.setKeytabFileForTesting(keytabFile
.getAbsolutePath());
4379 public int getNumHFiles(final TableName tableName
, final byte[] family
) {
4381 for (RegionServerThread regionServerThread
: getMiniHBaseCluster().getRegionServerThreads()) {
4382 numHFiles
+= getNumHFilesForRS(regionServerThread
.getRegionServer(), tableName
,
4388 public int getNumHFilesForRS(final HRegionServer rs
, final TableName tableName
,
4389 final byte[] family
) {
4391 for (Region region
: rs
.getRegions(tableName
)) {
4392 numHFiles
+= region
.getStore(family
).getStorefilesCount();
4397 public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd
, TableDescriptor rtd
) {
4398 assertEquals(ltd
.getValues().hashCode(), rtd
.getValues().hashCode());
4399 Collection
<ColumnFamilyDescriptor
> ltdFamilies
= Arrays
.asList(ltd
.getColumnFamilies());
4400 Collection
<ColumnFamilyDescriptor
> rtdFamilies
= Arrays
.asList(rtd
.getColumnFamilies());
4401 assertEquals(ltdFamilies
.size(), rtdFamilies
.size());
4402 for (Iterator
<ColumnFamilyDescriptor
> it
= ltdFamilies
.iterator(), it2
=
4403 rtdFamilies
.iterator(); it
.hasNext();) {
4405 ColumnFamilyDescriptor
.COMPARATOR
.compare(it
.next(), it2
.next()));