2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertTrue
;
22 import static org
.junit
.Assert
.fail
;
24 import edu
.umd
.cs
.findbugs
.annotations
.Nullable
;
26 import java
.io
.IOException
;
27 import java
.io
.OutputStream
;
28 import java
.lang
.reflect
.Field
;
29 import java
.lang
.reflect
.Modifier
;
30 import java
.net
.BindException
;
31 import java
.net
.DatagramSocket
;
32 import java
.net
.InetAddress
;
33 import java
.net
.ServerSocket
;
34 import java
.net
.Socket
;
35 import java
.net
.UnknownHostException
;
36 import java
.nio
.charset
.StandardCharsets
;
37 import java
.security
.MessageDigest
;
38 import java
.util
.ArrayList
;
39 import java
.util
.Arrays
;
40 import java
.util
.Collection
;
41 import java
.util
.Collections
;
42 import java
.util
.HashSet
;
43 import java
.util
.Iterator
;
44 import java
.util
.List
;
46 import java
.util
.NavigableSet
;
47 import java
.util
.Properties
;
48 import java
.util
.Random
;
50 import java
.util
.TreeSet
;
51 import java
.util
.concurrent
.TimeUnit
;
52 import java
.util
.concurrent
.atomic
.AtomicReference
;
53 import org
.apache
.commons
.io
.FileUtils
;
54 import org
.apache
.commons
.lang3
.RandomStringUtils
;
55 import org
.apache
.hadoop
.conf
.Configuration
;
56 import org
.apache
.hadoop
.fs
.FileSystem
;
57 import org
.apache
.hadoop
.fs
.Path
;
58 import org
.apache
.hadoop
.hbase
.Waiter
.ExplainingPredicate
;
59 import org
.apache
.hadoop
.hbase
.Waiter
.Predicate
;
60 import org
.apache
.hadoop
.hbase
.client
.Admin
;
61 import org
.apache
.hadoop
.hbase
.client
.AsyncClusterConnection
;
62 import org
.apache
.hadoop
.hbase
.client
.BufferedMutator
;
63 import org
.apache
.hadoop
.hbase
.client
.ClusterConnectionFactory
;
64 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
65 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
66 import org
.apache
.hadoop
.hbase
.client
.Connection
;
67 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
68 import org
.apache
.hadoop
.hbase
.client
.Consistency
;
69 import org
.apache
.hadoop
.hbase
.client
.Delete
;
70 import org
.apache
.hadoop
.hbase
.client
.Durability
;
71 import org
.apache
.hadoop
.hbase
.client
.Get
;
72 import org
.apache
.hadoop
.hbase
.client
.Hbck
;
73 import org
.apache
.hadoop
.hbase
.client
.MasterRegistry
;
74 import org
.apache
.hadoop
.hbase
.client
.Put
;
75 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
76 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
77 import org
.apache
.hadoop
.hbase
.client
.RegionLocator
;
78 import org
.apache
.hadoop
.hbase
.client
.Result
;
79 import org
.apache
.hadoop
.hbase
.client
.ResultScanner
;
80 import org
.apache
.hadoop
.hbase
.client
.Scan
;
81 import org
.apache
.hadoop
.hbase
.client
.Table
;
82 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
83 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
84 import org
.apache
.hadoop
.hbase
.client
.TableState
;
85 import org
.apache
.hadoop
.hbase
.fs
.HFileSystem
;
86 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
;
87 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
.Algorithm
;
88 import org
.apache
.hadoop
.hbase
.io
.encoding
.DataBlockEncoding
;
89 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCache
;
90 import org
.apache
.hadoop
.hbase
.io
.hfile
.ChecksumUtil
;
91 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFile
;
92 import org
.apache
.hadoop
.hbase
.ipc
.RpcServerInterface
;
93 import org
.apache
.hadoop
.hbase
.ipc
.ServerNotRunningYetException
;
94 import org
.apache
.hadoop
.hbase
.logging
.Log4jUtils
;
95 import org
.apache
.hadoop
.hbase
.mapreduce
.MapreduceTestingShim
;
96 import org
.apache
.hadoop
.hbase
.master
.HMaster
;
97 import org
.apache
.hadoop
.hbase
.master
.RegionState
;
98 import org
.apache
.hadoop
.hbase
.master
.ServerManager
;
99 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentManager
;
100 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentTestingUtil
;
101 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStateStore
;
102 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStates
;
103 import org
.apache
.hadoop
.hbase
.mob
.MobFileCache
;
104 import org
.apache
.hadoop
.hbase
.regionserver
.BloomType
;
105 import org
.apache
.hadoop
.hbase
.regionserver
.ChunkCreator
;
106 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
;
107 import org
.apache
.hadoop
.hbase
.regionserver
.HRegionServer
;
108 import org
.apache
.hadoop
.hbase
.regionserver
.HStore
;
109 import org
.apache
.hadoop
.hbase
.regionserver
.InternalScanner
;
110 import org
.apache
.hadoop
.hbase
.regionserver
.MemStoreLABImpl
;
111 import org
.apache
.hadoop
.hbase
.regionserver
.Region
;
112 import org
.apache
.hadoop
.hbase
.regionserver
.RegionScanner
;
113 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerServices
;
114 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerStoppedException
;
115 import org
.apache
.hadoop
.hbase
.security
.HBaseKerberosUtils
;
116 import org
.apache
.hadoop
.hbase
.security
.User
;
117 import org
.apache
.hadoop
.hbase
.security
.UserProvider
;
118 import org
.apache
.hadoop
.hbase
.security
.visibility
.VisibilityLabelsCache
;
119 import org
.apache
.hadoop
.hbase
.trace
.TraceUtil
;
120 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
121 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
122 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
123 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
;
124 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.MasterThread
;
125 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.RegionServerThread
;
126 import org
.apache
.hadoop
.hbase
.util
.Pair
;
127 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
;
128 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
.SplitAlgorithm
;
129 import org
.apache
.hadoop
.hbase
.util
.RetryCounter
;
130 import org
.apache
.hadoop
.hbase
.util
.Threads
;
131 import org
.apache
.hadoop
.hbase
.wal
.WAL
;
132 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
133 import org
.apache
.hadoop
.hbase
.zookeeper
.EmptyWatcher
;
134 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKConfig
;
135 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKWatcher
;
136 import org
.apache
.hadoop
.hdfs
.DFSClient
;
137 import org
.apache
.hadoop
.hdfs
.DistributedFileSystem
;
138 import org
.apache
.hadoop
.hdfs
.MiniDFSCluster
;
139 import org
.apache
.hadoop
.hdfs
.server
.namenode
.EditLogFileOutputStream
;
140 import org
.apache
.hadoop
.mapred
.JobConf
;
141 import org
.apache
.hadoop
.mapred
.MiniMRCluster
;
142 import org
.apache
.hadoop
.mapred
.TaskLog
;
143 import org
.apache
.hadoop
.minikdc
.MiniKdc
;
144 import org
.apache
.yetus
.audience
.InterfaceAudience
;
145 import org
.apache
.zookeeper
.WatchedEvent
;
146 import org
.apache
.zookeeper
.ZooKeeper
;
147 import org
.apache
.zookeeper
.ZooKeeper
.States
;
149 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.io
.Closeables
;
151 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
154 * Facility for testing HBase. Replacement for
155 * old HBaseTestCase and HBaseClusterTestCase functionality.
156 * Create an instance and keep it around testing HBase. This class is
157 * meant to be your one-stop shop for anything you might need testing. Manages
158 * one cluster at a time only. Managed cluster can be an in-process
159 * {@link MiniHBaseCluster}, or a deployed cluster of type {@code DistributedHBaseCluster}.
160 * Not all methods work with the real cluster.
161 * Depends on log4j being on classpath and
162 * hbase-site.xml for logging and test-run configuration. It does not set
164 * In the configuration properties, default values for master-info-port and
165 * region-server-port are overridden such that a random port will be assigned (thus
166 * avoiding port contention if another local HBase instance is already running).
167 * <p>To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
168 * setting it to true.
170 @InterfaceAudience.Public
171 @SuppressWarnings("deprecation")
172 public class HBaseTestingUtility
extends HBaseZKTestingUtility
{
175 * System property key to get test directory value. Name is as it is because mini dfs has
176 * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
178 * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
179 * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
182 private static final String TEST_DIRECTORY_KEY
= "test.build.data";
184 public static final String REGIONS_PER_SERVER_KEY
= "hbase.test.regions-per-server";
186 * The default number of regions per regionserver when creating a pre-split
189 public static final int DEFAULT_REGIONS_PER_SERVER
= 3;
192 public static final String PRESPLIT_TEST_TABLE_KEY
= "hbase.test.pre-split-table";
193 public static final boolean PRESPLIT_TEST_TABLE
= true;
195 private MiniDFSCluster dfsCluster
= null;
197 private volatile HBaseCluster hbaseCluster
= null;
198 private MiniMRCluster mrCluster
= null;
200 /** If there is a mini cluster running for this testing utility instance. */
201 private volatile boolean miniClusterRunning
;
203 private String hadoopLogDir
;
205 /** Directory on test filesystem where we put the data for this instance of
206 * HBaseTestingUtility*/
207 private Path dataTestDirOnTestFS
= null;
209 private volatile AsyncClusterConnection asyncConnection
;
211 /** Filesystem URI used for map-reduce mini-cluster setup */
212 private static String FS_URI
;
214 /** This is for unit tests parameterized with a single boolean. */
215 public static final List
<Object
[]> MEMSTORETS_TAGS_PARAMETRIZED
= memStoreTSAndTagsCombination();
218 * Checks to see if a specific port is available.
220 * @param port the port number to check for availability
221 * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
223 public static boolean available(int port
) {
224 ServerSocket ss
= null;
225 DatagramSocket ds
= null;
227 ss
= new ServerSocket(port
);
228 ss
.setReuseAddress(true);
229 ds
= new DatagramSocket(port
);
230 ds
.setReuseAddress(true);
232 } catch (IOException e
) {
242 } catch (IOException e
) {
243 /* should not be thrown */
252 * Create all combinations of Bloom filters and compression algorithms for
255 private static List
<Object
[]> bloomAndCompressionCombinations() {
256 List
<Object
[]> configurations
= new ArrayList
<>();
257 for (Compression
.Algorithm comprAlgo
:
258 HBaseCommonTestingUtility
.COMPRESSION_ALGORITHMS
) {
259 for (BloomType bloomType
: BloomType
.values()) {
260 configurations
.add(new Object
[] { comprAlgo
, bloomType
});
263 return Collections
.unmodifiableList(configurations
);
267 * Create combination of memstoreTS and tags
269 private static List
<Object
[]> memStoreTSAndTagsCombination() {
270 List
<Object
[]> configurations
= new ArrayList
<>();
271 configurations
.add(new Object
[] { false, false });
272 configurations
.add(new Object
[] { false, true });
273 configurations
.add(new Object
[] { true, false });
274 configurations
.add(new Object
[] { true, true });
275 return Collections
.unmodifiableList(configurations
);
278 public static List
<Object
[]> memStoreTSTagsAndOffheapCombination() {
279 List
<Object
[]> configurations
= new ArrayList
<>();
280 configurations
.add(new Object
[] { false, false, true });
281 configurations
.add(new Object
[] { false, false, false });
282 configurations
.add(new Object
[] { false, true, true });
283 configurations
.add(new Object
[] { false, true, false });
284 configurations
.add(new Object
[] { true, false, true });
285 configurations
.add(new Object
[] { true, false, false });
286 configurations
.add(new Object
[] { true, true, true });
287 configurations
.add(new Object
[] { true, true, false });
288 return Collections
.unmodifiableList(configurations
);
291 public static final Collection
<Object
[]> BLOOM_AND_COMPRESSION_COMBINATIONS
=
292 bloomAndCompressionCombinations();
296 * <p>Create an HBaseTestingUtility using a default configuration.
298 * <p>Initially, all tmp files are written to a local test data directory.
299 * Once {@link #startMiniDFSCluster} is called, either directly or via
300 * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
302 public HBaseTestingUtility() {
303 this(HBaseConfiguration
.create());
307 * <p>Create an HBaseTestingUtility using a given configuration.
309 * <p>Initially, all tmp files are written to a local test data directory.
310 * Once {@link #startMiniDFSCluster} is called, either directly or via
311 * {@link #startMiniCluster()}, tmp data will be written to the DFS directory instead.
313 * @param conf The configuration to use for further operations
315 public HBaseTestingUtility(@Nullable Configuration conf
) {
318 // a hbase checksum verification failure will cause unit tests to fail
319 ChecksumUtil
.generateExceptionForChecksumFailureForTest(true);
321 // Save this for when setting default file:// breaks things
322 if (this.conf
.get("fs.defaultFS") != null) {
323 this.conf
.set("original.defaultFS", this.conf
.get("fs.defaultFS"));
325 if (this.conf
.get(HConstants
.HBASE_DIR
) != null) {
326 this.conf
.set("original.hbase.dir", this.conf
.get(HConstants
.HBASE_DIR
));
328 // Every cluster is a local cluster until we start DFS
329 // Note that conf could be null, but this.conf will not be
330 String dataTestDir
= getDataTestDir().toString();
331 this.conf
.set("fs.defaultFS","file:///");
332 this.conf
.set(HConstants
.HBASE_DIR
, "file://" + dataTestDir
);
333 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
334 this.conf
.setBoolean(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
,false);
335 // If the value for random ports isn't set set it to true, thus making
336 // tests opt-out for random port assignment
337 this.conf
.setBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
,
338 this.conf
.getBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
, true));
342 * Close both the region {@code r} and it's underlying WAL. For use in tests.
344 public static void closeRegionAndWAL(final Region r
) throws IOException
{
345 closeRegionAndWAL((HRegion
)r
);
349 * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
351 public static void closeRegionAndWAL(final HRegion r
) throws IOException
{
352 if (r
== null) return;
354 if (r
.getWAL() == null) return;
359 * Returns this classes's instance of {@link Configuration}. Be careful how
360 * you use the returned Configuration since {@link Connection} instances
361 * can be shared. The Map of Connections is keyed by the Configuration. If
362 * say, a Connection was being used against a cluster that had been shutdown,
363 * see {@link #shutdownMiniCluster()}, then the Connection will no longer
364 * be wholesome. Rather than use the return direct, its usually best to
365 * make a copy and use that. Do
366 * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
367 * @return Instance of Configuration.
370 public Configuration
getConfiguration() {
371 return super.getConfiguration();
374 public void setHBaseCluster(HBaseCluster hbaseCluster
) {
375 this.hbaseCluster
= hbaseCluster
;
379 * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
380 * Give it a random name so can have many concurrent tests running if
381 * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY}
382 * System property, as it's what minidfscluster bases
383 * it data dir on. Moding a System property is not the way to do concurrent
384 * instances -- another instance could grab the temporary
385 * value unintentionally -- but not anything can do about it at moment;
386 * single instance only is how the minidfscluster works.
388 * We also create the underlying directory names for
389 * hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values
390 * in the conf, and as a system property for hadoop.tmp.dir (We do not create them!).
392 * @return The calculated data test build directory, if newly-created.
395 protected Path
setupDataTestDir() {
396 Path testPath
= super.setupDataTestDir();
397 if (null == testPath
) {
401 createSubDirAndSystemProperty(
403 testPath
, "hadoop-log-dir");
405 // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
406 // we want our own value to ensure uniqueness on the same machine
407 createSubDirAndSystemProperty(
409 testPath
, "hadoop-tmp-dir");
411 // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
413 "mapreduce.cluster.local.dir",
414 testPath
, "mapred-local-dir");
418 private void createSubDirAndSystemProperty(
419 String propertyName
, Path parent
, String subDirName
){
421 String sysValue
= System
.getProperty(propertyName
);
423 if (sysValue
!= null) {
424 // There is already a value set. So we do nothing but hope
425 // that there will be no conflicts
426 LOG
.info("System.getProperty(\""+propertyName
+"\") already set to: "+
427 sysValue
+ " so I do NOT create it in " + parent
);
428 String confValue
= conf
.get(propertyName
);
429 if (confValue
!= null && !confValue
.endsWith(sysValue
)){
431 propertyName
+ " property value differs in configuration and system: "+
432 "Configuration="+confValue
+" while System="+sysValue
+
433 " Erasing configuration value by system value."
436 conf
.set(propertyName
, sysValue
);
438 // Ok, it's not set, so we create it as a subdirectory
439 createSubDir(propertyName
, parent
, subDirName
);
440 System
.setProperty(propertyName
, conf
.get(propertyName
));
445 * @return Where to write test data on the test filesystem; Returns working directory
446 * for the test filesystem by default
447 * @see #setupDataTestDirOnTestFS()
448 * @see #getTestFileSystem()
450 private Path
getBaseTestDirOnTestFS() throws IOException
{
451 FileSystem fs
= getTestFileSystem();
452 return new Path(fs
.getWorkingDirectory(), "test-data");
456 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
457 * to write temporary test data. Call this method after setting up the mini dfs cluster
458 * if the test relies on it.
459 * @return a unique path in the test filesystem
461 public Path
getDataTestDirOnTestFS() throws IOException
{
462 if (dataTestDirOnTestFS
== null) {
463 setupDataTestDirOnTestFS();
466 return dataTestDirOnTestFS
;
470 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
471 * to write temporary test data. Call this method after setting up the mini dfs cluster
472 * if the test relies on it.
473 * @return a unique path in the test filesystem
474 * @param subdirName name of the subdir to create under the base test dir
476 public Path
getDataTestDirOnTestFS(final String subdirName
) throws IOException
{
477 return new Path(getDataTestDirOnTestFS(), subdirName
);
481 * Sets up a path in test filesystem to be used by tests.
482 * Creates a new directory if not already setup.
484 private void setupDataTestDirOnTestFS() throws IOException
{
485 if (dataTestDirOnTestFS
!= null) {
486 LOG
.warn("Data test on test fs dir already setup in "
487 + dataTestDirOnTestFS
.toString());
490 dataTestDirOnTestFS
= getNewDataTestDirOnTestFS();
494 * Sets up a new path in test filesystem to be used by tests.
496 private Path
getNewDataTestDirOnTestFS() throws IOException
{
497 //The file system can be either local, mini dfs, or if the configuration
498 //is supplied externally, it can be an external cluster FS. If it is a local
499 //file system, the tests should use getBaseTestDir, otherwise, we can use
500 //the working directory, and create a unique sub dir there
501 FileSystem fs
= getTestFileSystem();
503 String randomStr
= getRandomUUID().toString();
504 if (fs
.getUri().getScheme().equals(FileSystem
.getLocal(conf
).getUri().getScheme())) {
505 newDataTestDir
= new Path(getDataTestDir(), randomStr
);
506 File dataTestDir
= new File(newDataTestDir
.toString());
507 if (deleteOnExit()) dataTestDir
.deleteOnExit();
509 Path base
= getBaseTestDirOnTestFS();
510 newDataTestDir
= new Path(base
, randomStr
);
511 if (deleteOnExit()) fs
.deleteOnExit(newDataTestDir
);
513 return newDataTestDir
;
517 * Cleans the test data directory on the test filesystem.
518 * @return True if we removed the test dirs
519 * @throws IOException
521 public boolean cleanupDataTestDirOnTestFS() throws IOException
{
522 boolean ret
= getTestFileSystem().delete(dataTestDirOnTestFS
, true);
524 dataTestDirOnTestFS
= null;
529 * Cleans a subdirectory under the test data directory on the test filesystem.
530 * @return True if we removed child
531 * @throws IOException
533 public boolean cleanupDataTestDirOnTestFS(String subdirName
) throws IOException
{
534 Path cpath
= getDataTestDirOnTestFS(subdirName
);
535 return getTestFileSystem().delete(cpath
, true);
539 * Start a minidfscluster.
540 * @param servers How many DNs to start.
542 * @see #shutdownMiniDFSCluster()
543 * @return The mini dfs cluster created.
545 public MiniDFSCluster
startMiniDFSCluster(int servers
) throws Exception
{
546 return startMiniDFSCluster(servers
, null);
550 * Start a minidfscluster.
551 * This is useful if you want to run datanode on distinct hosts for things
552 * like HDFS block location verification.
553 * If you start MiniDFSCluster without host names, all instances of the
554 * datanodes will have the same host name.
555 * @param hosts hostnames DNs to run on.
557 * @see #shutdownMiniDFSCluster()
558 * @return The mini dfs cluster created.
560 public MiniDFSCluster
startMiniDFSCluster(final String hosts
[])
562 if ( hosts
!= null && hosts
.length
!= 0) {
563 return startMiniDFSCluster(hosts
.length
, hosts
);
565 return startMiniDFSCluster(1, null);
570 * Start a minidfscluster.
571 * Can only create one.
572 * @param servers How many DNs to start.
573 * @param hosts hostnames DNs to run on.
575 * @see #shutdownMiniDFSCluster()
576 * @return The mini dfs cluster created.
578 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String hosts
[])
580 return startMiniDFSCluster(servers
, null, hosts
);
583 private void setFs() throws IOException
{
584 if(this.dfsCluster
== null){
585 LOG
.info("Skipping setting fs because dfsCluster is null");
588 FileSystem fs
= this.dfsCluster
.getFileSystem();
589 CommonFSUtils
.setFsDefault(this.conf
, new Path(fs
.getUri()));
591 // re-enable this check with dfs
592 conf
.unset(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
);
595 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String racks
[], String hosts
[])
597 createDirsAndSetProperties();
598 EditLogFileOutputStream
.setShouldSkipFsyncForTesting(true);
600 // Error level to skip some warnings specific to the minicluster. See HBASE-4709
601 org
.apache
.log4j
.Logger
.getLogger(org
.apache
.hadoop
.metrics2
.util
.MBeans
.class).
602 setLevel(org
.apache
.log4j
.Level
.ERROR
);
603 org
.apache
.log4j
.Logger
.getLogger(org
.apache
.hadoop
.metrics2
.impl
.MetricsSystemImpl
.class).
604 setLevel(org
.apache
.log4j
.Level
.ERROR
);
606 TraceUtil
.initTracer(conf
);
608 this.dfsCluster
= new MiniDFSCluster(0, this.conf
, servers
, true, true,
609 true, null, racks
, hosts
, null);
611 // Set this just-started cluster as our filesystem.
614 // Wait for the cluster to be totally up
615 this.dfsCluster
.waitClusterUp();
617 //reset the test directory for test file system
618 dataTestDirOnTestFS
= null;
619 String dataTestDir
= getDataTestDir().toString();
620 conf
.set(HConstants
.HBASE_DIR
, dataTestDir
);
621 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
623 return this.dfsCluster
;
626 public MiniDFSCluster
startMiniDFSClusterForTestWAL(int namenodePort
) throws IOException
{
627 createDirsAndSetProperties();
628 dfsCluster
= new MiniDFSCluster(namenodePort
, conf
, 5, false, true, true, null,
634 * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
635 * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
639 * Configuration conf = TEST_UTIL.getConfiguration();
640 * for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
641 * Map.Entry<String, String> e = i.next();
642 * assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
646 private void createDirsAndSetProperties() throws IOException
{
647 setupClusterTestDir();
648 conf
.set(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
649 System
.setProperty(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
650 createDirAndSetProperty("test.cache.data");
651 createDirAndSetProperty("hadoop.tmp.dir");
652 hadoopLogDir
= createDirAndSetProperty("hadoop.log.dir");
653 createDirAndSetProperty("mapreduce.cluster.local.dir");
654 createDirAndSetProperty("mapreduce.cluster.temp.dir");
655 enableShortCircuit();
657 Path root
= getDataTestDirOnTestFS("hadoop");
658 conf
.set(MapreduceTestingShim
.getMROutputDirProp(),
659 new Path(root
, "mapred-output-dir").toString());
660 conf
.set("mapreduce.jobtracker.system.dir", new Path(root
, "mapred-system-dir").toString());
661 conf
.set("mapreduce.jobtracker.staging.root.dir",
662 new Path(root
, "mapreduce-jobtracker-staging-root-dir").toString());
663 conf
.set("mapreduce.job.working.dir", new Path(root
, "mapred-working-dir").toString());
664 conf
.set("yarn.app.mapreduce.am.staging-dir",
665 new Path(root
, "mapreduce-am-staging-root-dir").toString());
667 // Frustrate yarn's and hdfs's attempts at writing /tmp.
668 // Below is fragile. Make it so we just interpolate any 'tmp' reference.
669 createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
670 createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
671 createDirAndSetProperty("yarn.nodemanager.log-dirs");
672 createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
673 createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
674 createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
675 createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
676 createDirAndSetProperty("dfs.journalnode.edits.dir");
677 createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
678 createDirAndSetProperty("nfs.dump.dir");
679 createDirAndSetProperty("java.io.tmpdir");
680 createDirAndSetProperty("dfs.journalnode.edits.dir");
681 createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
682 createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
686 * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating
687 * new column families. Default to false.
689 public boolean isNewVersionBehaviorEnabled(){
690 final String propName
= "hbase.tests.new.version.behavior";
691 String v
= System
.getProperty(propName
);
693 return Boolean
.parseBoolean(v
);
699 * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property.
700 * This allows to specify this parameter on the command line.
701 * If not set, default is true.
703 public boolean isReadShortCircuitOn(){
704 final String propName
= "hbase.tests.use.shortcircuit.reads";
705 String readOnProp
= System
.getProperty(propName
);
706 if (readOnProp
!= null){
707 return Boolean
.parseBoolean(readOnProp
);
709 return conf
.getBoolean(propName
, false);
713 /** Enable the short circuit read, unless configured differently.
714 * Set both HBase and HDFS settings, including skipping the hdfs checksum checks.
716 private void enableShortCircuit() {
717 if (isReadShortCircuitOn()) {
718 String curUser
= System
.getProperty("user.name");
719 LOG
.info("read short circuit is ON for user " + curUser
);
720 // read short circuit, for hdfs
721 conf
.set("dfs.block.local-path-access.user", curUser
);
722 // read short circuit, for hbase
723 conf
.setBoolean("dfs.client.read.shortcircuit", true);
724 // Skip checking checksum, for the hdfs client and the datanode
725 conf
.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
727 LOG
.info("read short circuit is OFF");
731 private String
createDirAndSetProperty(final String property
) {
732 return createDirAndSetProperty(property
, property
);
735 private String
createDirAndSetProperty(final String relPath
, String property
) {
736 String path
= getDataTestDir(relPath
).toString();
737 System
.setProperty(property
, path
);
738 conf
.set(property
, path
);
739 new File(path
).mkdirs();
740 LOG
.info("Setting " + property
+ " to " + path
+ " in system properties and HBase conf");
745 * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
747 * @throws IOException
749 public void shutdownMiniDFSCluster() throws IOException
{
750 if (this.dfsCluster
!= null) {
751 // The below throws an exception per dn, AsynchronousCloseException.
752 this.dfsCluster
.shutdown();
754 dataTestDirOnTestFS
= null;
755 CommonFSUtils
.setFsDefault(this.conf
, new Path("file:///"));
760 * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
761 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
762 * @param createWALDir Whether to create a new WAL directory.
763 * @return The mini HBase cluster created.
764 * @see #shutdownMiniCluster()
765 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
766 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
767 * @see #startMiniCluster(StartMiniClusterOption)
768 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
771 public MiniHBaseCluster
startMiniCluster(boolean createWALDir
) throws Exception
{
772 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
773 .createWALDir(createWALDir
).build();
774 return startMiniCluster(option
);
778 * Start up a minicluster of hbase, dfs, and zookeeper.
779 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
780 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
781 * @param createRootDir Whether to create a new root or data directory path.
782 * @return The mini HBase cluster created.
783 * @see #shutdownMiniCluster()
784 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
785 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
786 * @see #startMiniCluster(StartMiniClusterOption)
787 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
790 public MiniHBaseCluster
startMiniCluster(int numSlaves
, boolean createRootDir
)
792 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
793 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).createRootDir(createRootDir
).build();
794 return startMiniCluster(option
);
798 * Start up a minicluster of hbase, dfs, and zookeeper.
799 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
800 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
801 * @param createRootDir Whether to create a new root or data directory path.
802 * @param createWALDir Whether to create a new WAL directory.
803 * @return The mini HBase cluster created.
804 * @see #shutdownMiniCluster()
805 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
806 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
807 * @see #startMiniCluster(StartMiniClusterOption)
808 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
811 public MiniHBaseCluster
startMiniCluster(int numSlaves
, boolean createRootDir
,
812 boolean createWALDir
) throws Exception
{
813 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
814 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).createRootDir(createRootDir
)
815 .createWALDir(createWALDir
).build();
816 return startMiniCluster(option
);
820 * Start up a minicluster of hbase, dfs, and zookeeper.
821 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
822 * @param numMasters Master node number.
823 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
824 * @param createRootDir Whether to create a new root or data directory path.
825 * @return The mini HBase cluster created.
826 * @see #shutdownMiniCluster()
827 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
828 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
829 * @see #startMiniCluster(StartMiniClusterOption)
830 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
833 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, boolean createRootDir
)
835 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
836 .numMasters(numMasters
).numRegionServers(numSlaves
).createRootDir(createRootDir
)
837 .numDataNodes(numSlaves
).build();
838 return startMiniCluster(option
);
842 * Start up a minicluster of hbase, dfs, and zookeeper.
843 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
844 * @param numMasters Master node number.
845 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
846 * @return The mini HBase cluster created.
847 * @see #shutdownMiniCluster()
848 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
849 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
850 * @see #startMiniCluster(StartMiniClusterOption)
851 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
854 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
) throws Exception
{
855 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
856 .numMasters(numMasters
).numRegionServers(numSlaves
).numDataNodes(numSlaves
).build();
857 return startMiniCluster(option
);
861 * Start up a minicluster of hbase, dfs, and zookeeper.
862 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
863 * @param numMasters Master node number.
864 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
865 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
866 * HDFS data node number.
867 * @param createRootDir Whether to create a new root or data directory path.
868 * @return The mini HBase cluster created.
869 * @see #shutdownMiniCluster()
870 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
871 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
872 * @see #startMiniCluster(StartMiniClusterOption)
873 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
876 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
,
877 boolean createRootDir
) throws Exception
{
878 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
879 .numMasters(numMasters
).numRegionServers(numSlaves
).createRootDir(createRootDir
)
880 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
).build();
881 return startMiniCluster(option
);
885 * Start up a minicluster of hbase, dfs, and zookeeper.
886 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
887 * @param numMasters Master node number.
888 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
889 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
890 * HDFS data node number.
891 * @return The mini HBase cluster created.
892 * @see #shutdownMiniCluster()
893 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
894 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
895 * @see #startMiniCluster(StartMiniClusterOption)
896 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
899 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
)
901 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
902 .numMasters(numMasters
).numRegionServers(numSlaves
)
903 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
).build();
904 return startMiniCluster(option
);
908 * Start up a minicluster of hbase, dfs, and zookeeper.
909 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
910 * @param numMasters Master node number.
911 * @param numRegionServers Number of region servers.
912 * @param numDataNodes Number of datanodes.
913 * @return The mini HBase cluster created.
914 * @see #shutdownMiniCluster()
915 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
916 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
917 * @see #startMiniCluster(StartMiniClusterOption)
918 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
921 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
)
923 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
924 .numMasters(numMasters
).numRegionServers(numRegionServers
).numDataNodes(numDataNodes
)
926 return startMiniCluster(option
);
930 * Start up a minicluster of hbase, dfs, and zookeeper.
931 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
932 * @param numMasters Master node number.
933 * @param numSlaves Slave node number, for both HBase region server and HDFS data node.
934 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
935 * HDFS data node number.
936 * @param masterClass The class to use as HMaster, or null for default.
937 * @param rsClass The class to use as HRegionServer, or null for default.
938 * @return The mini HBase cluster created.
939 * @see #shutdownMiniCluster()
940 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
941 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
942 * @see #startMiniCluster(StartMiniClusterOption)
943 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
946 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numSlaves
, String
[] dataNodeHosts
,
947 Class
<?
extends HMaster
> masterClass
,
948 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
)
950 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
951 .numMasters(numMasters
).masterClass(masterClass
)
952 .numRegionServers(numSlaves
).rsClass(rsClass
)
953 .numDataNodes(numSlaves
).dataNodeHosts(dataNodeHosts
)
955 return startMiniCluster(option
);
959 * Start up a minicluster of hbase, dfs, and zookeeper.
960 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
961 * @param numMasters Master node number.
962 * @param numRegionServers Number of region servers.
963 * @param numDataNodes Number of datanodes.
964 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
965 * HDFS data node number.
966 * @param masterClass The class to use as HMaster, or null for default.
967 * @param rsClass The class to use as HRegionServer, or null for default.
968 * @return The mini HBase cluster created.
969 * @see #shutdownMiniCluster()
970 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
971 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
972 * @see #startMiniCluster(StartMiniClusterOption)
973 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
976 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
,
977 String
[] dataNodeHosts
, Class
<?
extends HMaster
> masterClass
,
978 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
)
980 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
981 .numMasters(numMasters
).masterClass(masterClass
)
982 .numRegionServers(numRegionServers
).rsClass(rsClass
)
983 .numDataNodes(numDataNodes
).dataNodeHosts(dataNodeHosts
)
985 return startMiniCluster(option
);
989 * Start up a minicluster of hbase, dfs, and zookeeper.
990 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
991 * @param numMasters Master node number.
992 * @param numRegionServers Number of region servers.
993 * @param numDataNodes Number of datanodes.
994 * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
995 * HDFS data node number.
996 * @param masterClass The class to use as HMaster, or null for default.
997 * @param rsClass The class to use as HRegionServer, or null for default.
998 * @param createRootDir Whether to create a new root or data directory path.
999 * @param createWALDir Whether to create a new WAL directory.
1000 * @return The mini HBase cluster created.
1001 * @see #shutdownMiniCluster()
1002 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1003 * {@link #startMiniCluster(StartMiniClusterOption)} instead.
1004 * @see #startMiniCluster(StartMiniClusterOption)
1005 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1008 public MiniHBaseCluster
startMiniCluster(int numMasters
, int numRegionServers
, int numDataNodes
,
1009 String
[] dataNodeHosts
, Class
<?
extends HMaster
> masterClass
,
1010 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
, boolean createRootDir
,
1011 boolean createWALDir
) throws Exception
{
1012 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1013 .numMasters(numMasters
).masterClass(masterClass
)
1014 .numRegionServers(numRegionServers
).rsClass(rsClass
)
1015 .numDataNodes(numDataNodes
).dataNodeHosts(dataNodeHosts
)
1016 .createRootDir(createRootDir
).createWALDir(createWALDir
)
1018 return startMiniCluster(option
);
1022 * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number.
1023 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1024 * @param numSlaves slave node number, for both HBase region server and HDFS data node.
1025 * @see #startMiniCluster(StartMiniClusterOption option)
1026 * @see #shutdownMiniDFSCluster()
1028 public MiniHBaseCluster
startMiniCluster(int numSlaves
) throws Exception
{
1029 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1030 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).build();
1031 return startMiniCluster(option
);
1035 * Start up a minicluster of hbase, dfs and zookeeper all using default options.
1036 * Option default value can be found in {@link StartMiniClusterOption.Builder}.
1037 * @see #startMiniCluster(StartMiniClusterOption option)
1038 * @see #shutdownMiniDFSCluster()
1040 public MiniHBaseCluster
startMiniCluster() throws Exception
{
1041 return startMiniCluster(StartMiniClusterOption
.builder().build());
1045 * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed.
1046 * It modifies Configuration. It homes the cluster data directory under a random
1047 * subdirectory in a directory under System property test.build.data, to be cleaned up on exit.
1048 * @see #shutdownMiniDFSCluster()
1050 public MiniHBaseCluster
startMiniCluster(StartMiniClusterOption option
) throws Exception
{
1051 LOG
.info("Starting up minicluster with option: {}", option
);
1053 // If we already put up a cluster, fail.
1054 if (miniClusterRunning
) {
1055 throw new IllegalStateException("A mini-cluster is already running");
1057 miniClusterRunning
= true;
1059 setupClusterTestDir();
1060 System
.setProperty(TEST_DIRECTORY_KEY
, this.clusterTestDir
.getPath());
1062 // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1063 // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1064 if (dfsCluster
== null) {
1065 LOG
.info("STARTING DFS");
1066 dfsCluster
= startMiniDFSCluster(option
.getNumDataNodes(), option
.getDataNodeHosts());
1068 LOG
.info("NOT STARTING DFS");
1071 // Start up a zk cluster.
1072 if (getZkCluster() == null) {
1073 startMiniZKCluster(option
.getNumZkServers());
1076 // Start the MiniHBaseCluster
1077 return startMiniHBaseCluster(option
);
1081 * Starts up mini hbase cluster.
1082 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1083 * This is useful when doing stepped startup of clusters.
1084 * @return Reference to the hbase mini hbase cluster.
1085 * @see #startMiniCluster(StartMiniClusterOption)
1086 * @see #shutdownMiniHBaseCluster()
1088 public MiniHBaseCluster
startMiniHBaseCluster(StartMiniClusterOption option
)
1089 throws IOException
, InterruptedException
{
1090 // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1091 createRootDir(option
.isCreateRootDir());
1092 if (option
.isCreateWALDir()) {
1095 // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1096 // for tests that do not read hbase-defaults.xml
1099 // These settings will make the server waits until this exact number of
1100 // regions servers are connected.
1101 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1) == -1) {
1102 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, option
.getNumRegionServers());
1104 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1) == -1) {
1105 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, option
.getNumRegionServers());
1108 Configuration c
= new Configuration(this.conf
);
1109 TraceUtil
.initTracer(c
);
1111 new MiniHBaseCluster(c
, option
.getNumMasters(), option
.getNumAlwaysStandByMasters(),
1112 option
.getNumRegionServers(), option
.getRsPorts(), option
.getMasterClass(),
1113 option
.getRsClass());
1114 // Populate the master address configuration from mini cluster configuration.
1115 conf
.set(HConstants
.MASTER_ADDRS_KEY
, MasterRegistry
.getMasterAddr(c
));
1116 // Don't leave here till we've done a successful scan of the hbase:meta
1117 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
1118 ResultScanner s
= t
.getScanner(new Scan());
1119 while (s
.next() != null) {
1125 getAdmin(); // create immediately the hbaseAdmin
1126 LOG
.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1128 return (MiniHBaseCluster
) hbaseCluster
;
1132 * Starts up mini hbase cluster using default options.
1133 * Default options can be found in {@link StartMiniClusterOption.Builder}.
1134 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1135 * @see #shutdownMiniHBaseCluster()
1137 public MiniHBaseCluster
startMiniHBaseCluster() throws IOException
, InterruptedException
{
1138 return startMiniHBaseCluster(StartMiniClusterOption
.builder().build());
1142 * Starts up mini hbase cluster.
1143 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1144 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1145 * @param numMasters Master node number.
1146 * @param numRegionServers Number of region servers.
1147 * @return The mini HBase cluster created.
1148 * @see #shutdownMiniHBaseCluster()
1149 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1150 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1151 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1152 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1155 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
)
1156 throws IOException
, InterruptedException
{
1157 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1158 .numMasters(numMasters
).numRegionServers(numRegionServers
).build();
1159 return startMiniHBaseCluster(option
);
1163 * Starts up mini hbase cluster.
1164 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1165 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1166 * @param numMasters Master node number.
1167 * @param numRegionServers Number of region servers.
1168 * @param rsPorts Ports that RegionServer should use.
1169 * @return The mini HBase cluster created.
1170 * @see #shutdownMiniHBaseCluster()
1171 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1172 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1173 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1174 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1177 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
1178 List
<Integer
> rsPorts
) throws IOException
, InterruptedException
{
1179 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1180 .numMasters(numMasters
).numRegionServers(numRegionServers
).rsPorts(rsPorts
).build();
1181 return startMiniHBaseCluster(option
);
1185 * Starts up mini hbase cluster.
1186 * Usually you won't want this. You'll usually want {@link #startMiniCluster()}.
1187 * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1188 * @param numMasters Master node number.
1189 * @param numRegionServers Number of region servers.
1190 * @param rsPorts Ports that RegionServer should use.
1191 * @param masterClass The class to use as HMaster, or null for default.
1192 * @param rsClass The class to use as HRegionServer, or null for default.
1193 * @param createRootDir Whether to create a new root or data directory path.
1194 * @param createWALDir Whether to create a new WAL directory.
1195 * @return The mini HBase cluster created.
1196 * @see #shutdownMiniHBaseCluster()
1197 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1198 * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1199 * @see #startMiniHBaseCluster(StartMiniClusterOption)
1200 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1203 public MiniHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
1204 List
<Integer
> rsPorts
, Class
<?
extends HMaster
> masterClass
,
1205 Class
<?
extends MiniHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
,
1206 boolean createRootDir
, boolean createWALDir
) throws IOException
, InterruptedException
{
1207 StartMiniClusterOption option
= StartMiniClusterOption
.builder()
1208 .numMasters(numMasters
).masterClass(masterClass
)
1209 .numRegionServers(numRegionServers
).rsClass(rsClass
).rsPorts(rsPorts
)
1210 .createRootDir(createRootDir
).createWALDir(createWALDir
).build();
1211 return startMiniHBaseCluster(option
);
1215 * Starts the hbase cluster up again after shutting it down previously in a
1216 * test. Use this if you want to keep dfs/zk up and just stop/start hbase.
1217 * @param servers number of region servers
1219 public void restartHBaseCluster(int servers
) throws IOException
, InterruptedException
{
1220 this.restartHBaseCluster(servers
, null);
1223 public void restartHBaseCluster(int servers
, List
<Integer
> ports
)
1224 throws IOException
, InterruptedException
{
1225 StartMiniClusterOption option
=
1226 StartMiniClusterOption
.builder().numRegionServers(servers
).rsPorts(ports
).build();
1227 restartHBaseCluster(option
);
1228 invalidateConnection();
1231 public void restartHBaseCluster(StartMiniClusterOption option
)
1232 throws IOException
, InterruptedException
{
1233 if (hbaseAdmin
!= null) {
1237 if (this.asyncConnection
!= null) {
1238 this.asyncConnection
.close();
1239 this.asyncConnection
= null;
1242 new MiniHBaseCluster(this.conf
, option
.getNumMasters(), option
.getNumAlwaysStandByMasters(),
1243 option
.getNumRegionServers(), option
.getRsPorts(), option
.getMasterClass(),
1244 option
.getRsClass());
1245 // Don't leave here till we've done a successful scan of the hbase:meta
1246 Connection conn
= ConnectionFactory
.createConnection(this.conf
);
1247 Table t
= conn
.getTable(TableName
.META_TABLE_NAME
);
1248 ResultScanner s
= t
.getScanner(new Scan());
1249 while (s
.next() != null) {
1252 LOG
.info("HBase has been restarted");
1259 * @return Current mini hbase cluster. Only has something in it after a call
1260 * to {@link #startMiniCluster()}.
1261 * @see #startMiniCluster()
1263 public MiniHBaseCluster
getMiniHBaseCluster() {
1264 if (this.hbaseCluster
== null || this.hbaseCluster
instanceof MiniHBaseCluster
) {
1265 return (MiniHBaseCluster
)this.hbaseCluster
;
1267 throw new RuntimeException(hbaseCluster
+ " not an instance of " +
1268 MiniHBaseCluster
.class.getName());
1272 * Stops mini hbase, zk, and hdfs clusters.
1273 * @see #startMiniCluster(int)
1275 public void shutdownMiniCluster() throws IOException
{
1276 LOG
.info("Shutting down minicluster");
1277 shutdownMiniHBaseCluster();
1278 shutdownMiniDFSCluster();
1279 shutdownMiniZKCluster();
1282 miniClusterRunning
= false;
1283 LOG
.info("Minicluster is down");
1287 * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1288 * @throws java.io.IOException in case command is unsuccessful
1290 public void shutdownMiniHBaseCluster() throws IOException
{
1292 if (this.hbaseCluster
!= null) {
1293 this.hbaseCluster
.shutdown();
1294 // Wait till hbase is down before going on to shutdown zk.
1295 this.hbaseCluster
.waitUntilShutDown();
1296 this.hbaseCluster
= null;
1298 if (zooKeeperWatcher
!= null) {
1299 zooKeeperWatcher
.close();
1300 zooKeeperWatcher
= null;
1305 * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1306 * @throws java.io.IOException throws in case command is unsuccessful
1308 public void killMiniHBaseCluster() throws IOException
{
1310 if (this.hbaseCluster
!= null) {
1311 getMiniHBaseCluster().killAll();
1312 this.hbaseCluster
= null;
1314 if (zooKeeperWatcher
!= null) {
1315 zooKeeperWatcher
.close();
1316 zooKeeperWatcher
= null;
1320 // close hbase admin, close current connection and reset MIN MAX configs for RS.
1321 private void cleanup() throws IOException
{
1323 // unset the configuration for MIN and MAX RS to start
1324 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
1325 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1);
1329 * Returns the path to the default root dir the minicluster uses. If <code>create</code>
1330 * is true, a new root directory path is fetched irrespective of whether it has been fetched
1331 * before or not. If false, previous path is used.
1332 * Note: this does not cause the root dir to be created.
1333 * @return Fully qualified path for the default hbase root dir
1334 * @throws IOException
1336 public Path
getDefaultRootDirPath(boolean create
) throws IOException
{
1338 return getDataTestDirOnTestFS();
1340 return getNewDataTestDirOnTestFS();
1345 * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)}
1346 * except that <code>create</code> flag is false.
1347 * Note: this does not cause the root dir to be created.
1348 * @return Fully qualified path for the default hbase root dir
1349 * @throws IOException
1351 public Path
getDefaultRootDirPath() throws IOException
{
1352 return getDefaultRootDirPath(false);
1356 * Creates an hbase rootdir in user home directory. Also creates hbase
1357 * version file. Normally you won't make use of this method. Root hbasedir
1358 * is created for you as part of mini cluster startup. You'd only use this
1359 * method if you were doing manual operation.
1360 * @param create This flag decides whether to get a new
1361 * root or data directory path or not, if it has been fetched already.
1362 * Note : Directory will be made irrespective of whether path has been fetched or not.
1363 * If directory already exists, it will be overwritten
1364 * @return Fully qualified path to hbase root dir
1365 * @throws IOException
1367 public Path
createRootDir(boolean create
) throws IOException
{
1368 FileSystem fs
= FileSystem
.get(this.conf
);
1369 Path hbaseRootdir
= getDefaultRootDirPath(create
);
1370 CommonFSUtils
.setRootDir(this.conf
, hbaseRootdir
);
1371 fs
.mkdirs(hbaseRootdir
);
1372 FSUtils
.setVersion(fs
, hbaseRootdir
);
1373 return hbaseRootdir
;
1377 * Same as {@link HBaseTestingUtility#createRootDir(boolean create)}
1378 * except that <code>create</code> flag is false.
1379 * @return Fully qualified path to hbase root dir
1380 * @throws IOException
1382 public Path
createRootDir() throws IOException
{
1383 return createRootDir(false);
1387 * Creates a hbase walDir in the user's home directory.
1388 * Normally you won't make use of this method. Root hbaseWALDir
1389 * is created for you as part of mini cluster startup. You'd only use this
1390 * method if you were doing manual operation.
1392 * @return Fully qualified path to hbase root dir
1393 * @throws IOException
1395 public Path
createWALRootDir() throws IOException
{
1396 FileSystem fs
= FileSystem
.get(this.conf
);
1397 Path walDir
= getNewDataTestDirOnTestFS();
1398 CommonFSUtils
.setWALRootDir(this.conf
, walDir
);
1403 private void setHBaseFsTmpDir() throws IOException
{
1404 String hbaseFsTmpDirInString
= this.conf
.get("hbase.fs.tmp.dir");
1405 if (hbaseFsTmpDirInString
== null) {
1406 this.conf
.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1407 LOG
.info("Setting hbase.fs.tmp.dir to " + this.conf
.get("hbase.fs.tmp.dir"));
1409 LOG
.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString
);
1414 * Flushes all caches in the mini hbase cluster
1415 * @throws IOException
1417 public void flush() throws IOException
{
1418 getMiniHBaseCluster().flushcache();
1422 * Flushes all caches in the mini hbase cluster
1423 * @throws IOException
1425 public void flush(TableName tableName
) throws IOException
{
1426 getMiniHBaseCluster().flushcache(tableName
);
1430 * Compact all regions in the mini hbase cluster
1431 * @throws IOException
1433 public void compact(boolean major
) throws IOException
{
1434 getMiniHBaseCluster().compact(major
);
1438 * Compact all of a table's reagion in the mini hbase cluster
1439 * @throws IOException
1441 public void compact(TableName tableName
, boolean major
) throws IOException
{
1442 getMiniHBaseCluster().compact(tableName
, major
);
1449 * @return A Table instance for the created table.
1450 * @throws IOException
1452 public Table
createTable(TableName tableName
, String family
)
1454 return createTable(tableName
, new String
[]{family
});
1461 * @return A Table instance for the created table.
1462 * @throws IOException
1464 public Table
createTable(TableName tableName
, String
[] families
)
1465 throws IOException
{
1466 List
<byte[]> fams
= new ArrayList
<>(families
.length
);
1467 for (String family
: families
) {
1468 fams
.add(Bytes
.toBytes(family
));
1470 return createTable(tableName
, fams
.toArray(new byte[0][]));
1477 * @return A Table instance for the created table.
1478 * @throws IOException
1480 public Table
createTable(TableName tableName
, byte[] family
)
1482 return createTable(tableName
, new byte[][]{family
});
1486 * Create a table with multiple regions.
1490 * @return A Table instance for the created table.
1491 * @throws IOException
1493 public Table
createMultiRegionTable(TableName tableName
, byte[] family
, int numRegions
)
1494 throws IOException
{
1495 if (numRegions
< 3) throw new IOException("Must create at least 3 regions");
1496 byte[] startKey
= Bytes
.toBytes("aaaaa");
1497 byte[] endKey
= Bytes
.toBytes("zzzzz");
1498 byte[][] splitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
1500 return createTable(tableName
, new byte[][] { family
}, splitKeys
);
1507 * @return A Table instance for the created table.
1508 * @throws IOException
1510 public Table
createTable(TableName tableName
, byte[][] families
)
1511 throws IOException
{
1512 return createTable(tableName
, families
, (byte[][]) null);
1516 * Create a table with multiple regions.
1519 * @return A Table instance for the created table.
1520 * @throws IOException
1522 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
) throws IOException
{
1523 return createTable(tableName
, families
, KEYS_FOR_HBA_CREATE_TABLE
);
1531 * @return A Table instance for the created table.
1532 * @throws IOException
1534 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
)
1535 throws IOException
{
1536 return createTable(tableName
, families
, splitKeys
, 1, new Configuration(getConfiguration()));
1541 * @param tableName the table name
1542 * @param families the families
1543 * @param splitKeys the splitkeys
1544 * @param replicaCount the region replica count
1545 * @return A Table instance for the created table.
1546 * @throws IOException throws IOException
1548 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1549 int replicaCount
) throws IOException
{
1550 return createTable(tableName
, families
, splitKeys
, replicaCount
,
1551 new Configuration(getConfiguration()));
1554 public Table
createTable(TableName tableName
, byte[][] families
,
1555 int numVersions
, byte[] startKey
, byte[] endKey
, int numRegions
)
1557 HTableDescriptor desc
= createTableDescriptor(tableName
, families
, numVersions
);
1559 getAdmin().createTable(desc
, startKey
, endKey
, numRegions
);
1560 // HBaseAdmin only waits for regions to appear in hbase:meta we
1561 // should wait until they are assigned
1562 waitUntilAllRegionsAssigned(tableName
);
1563 return getConnection().getTable(tableName
);
1570 * @param c Configuration to use
1571 * @return A Table instance for the created table.
1572 * @throws IOException
1574 public Table
createTable(TableDescriptor htd
, byte[][] families
, Configuration c
)
1575 throws IOException
{
1576 return createTable(htd
, families
, null, c
);
1581 * @param htd table descriptor
1582 * @param families array of column families
1583 * @param splitKeys array of split keys
1584 * @param c Configuration to use
1585 * @return A Table instance for the created table.
1586 * @throws IOException if getAdmin or createTable fails
1588 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1589 Configuration c
) throws IOException
{
1590 // Disable blooms (they are on by default as of 0.95) but we disable them here because
1591 // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1592 // on is interfering.
1593 return createTable(htd
, families
, splitKeys
, BloomType
.NONE
, HConstants
.DEFAULT_BLOCKSIZE
, c
);
1598 * @param htd table descriptor
1599 * @param families array of column families
1600 * @param splitKeys array of split keys
1601 * @param type Bloom type
1602 * @param blockSize block size
1603 * @param c Configuration to use
1604 * @return A Table instance for the created table.
1605 * @throws IOException if getAdmin or createTable fails
1608 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1609 BloomType type
, int blockSize
, Configuration c
) throws IOException
{
1610 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1611 for (byte[] family
: families
) {
1612 ColumnFamilyDescriptorBuilder cfdb
= ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1613 .setBloomFilterType(type
)
1614 .setBlocksize(blockSize
);
1615 if (isNewVersionBehaviorEnabled()) {
1616 cfdb
.setNewVersionBehavior(true);
1618 builder
.setColumnFamily(cfdb
.build());
1620 TableDescriptor td
= builder
.build();
1621 if (splitKeys
!= null) {
1622 getAdmin().createTable(td
, splitKeys
);
1624 getAdmin().createTable(td
);
1626 // HBaseAdmin only waits for regions to appear in hbase:meta
1627 // we should wait until they are assigned
1628 waitUntilAllRegionsAssigned(td
.getTableName());
1629 return getConnection().getTable(td
.getTableName());
1634 * @param htd table descriptor
1635 * @param splitRows array of split keys
1636 * @return A Table instance for the created table.
1637 * @throws IOException
1639 public Table
createTable(TableDescriptor htd
, byte[][] splitRows
)
1640 throws IOException
{
1641 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1642 if (isNewVersionBehaviorEnabled()) {
1643 for (ColumnFamilyDescriptor family
: htd
.getColumnFamilies()) {
1644 builder
.setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1645 .setNewVersionBehavior(true).build());
1648 if (splitRows
!= null) {
1649 getAdmin().createTable(builder
.build(), splitRows
);
1651 getAdmin().createTable(builder
.build());
1653 // HBaseAdmin only waits for regions to appear in hbase:meta
1654 // we should wait until they are assigned
1655 waitUntilAllRegionsAssigned(htd
.getTableName());
1656 return getConnection().getTable(htd
.getTableName());
1661 * @param tableName the table name
1662 * @param families the families
1663 * @param splitKeys the split keys
1664 * @param replicaCount the replica count
1665 * @param c Configuration to use
1666 * @return A Table instance for the created table.
1667 * @throws IOException
1669 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1670 int replicaCount
, final Configuration c
) throws IOException
{
1671 HTableDescriptor htd
= new HTableDescriptor(tableName
);
1672 htd
.setRegionReplication(replicaCount
);
1673 return createTable(htd
, families
, splitKeys
, c
);
1680 * @param numVersions
1681 * @return A Table instance for the created table.
1682 * @throws IOException
1684 public Table
createTable(TableName tableName
, byte[] family
, int numVersions
)
1685 throws IOException
{
1686 return createTable(tableName
, new byte[][]{family
}, numVersions
);
1693 * @param numVersions
1694 * @return A Table instance for the created table.
1695 * @throws IOException
1697 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
)
1698 throws IOException
{
1699 return createTable(tableName
, families
, numVersions
, (byte[][]) null);
1706 * @param numVersions
1708 * @return A Table instance for the created table.
1709 * @throws IOException
1711 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
,
1712 byte[][] splitKeys
) throws IOException
{
1713 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1714 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1715 for (byte[] family
: families
) {
1716 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1717 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1718 .setMaxVersions(numVersions
);
1719 if (isNewVersionBehaviorEnabled()) {
1720 familyDescriptor
.setNewVersionBehavior(true);
1722 tableDescriptor
.setColumnFamily(familyDescriptor
);
1724 if (splitKeys
!= null) {
1725 getAdmin().createTable(tableDescriptor
, splitKeys
);
1727 getAdmin().createTable(tableDescriptor
);
1729 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1731 waitUntilAllRegionsAssigned(tableName
);
1732 return getConnection().getTable(tableName
);
1736 * Create a table with multiple regions.
1739 * @param numVersions
1740 * @return A Table instance for the created table.
1741 * @throws IOException
1743 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
, int numVersions
)
1744 throws IOException
{
1745 return createTable(tableName
, families
, numVersions
, KEYS_FOR_HBA_CREATE_TABLE
);
1752 * @param numVersions
1754 * @return A Table instance for the created table.
1755 * @throws IOException
1757 public Table
createTable(TableName tableName
, byte[][] families
,
1758 int numVersions
, int blockSize
) throws IOException
{
1759 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1760 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1761 for (byte[] family
: families
) {
1762 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1763 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1764 .setMaxVersions(numVersions
)
1765 .setBlocksize(blockSize
);
1766 if (isNewVersionBehaviorEnabled()) {
1767 familyDescriptor
.setNewVersionBehavior(true);
1769 tableDescriptor
.setColumnFamily(familyDescriptor
);
1771 getAdmin().createTable(tableDescriptor
);
1772 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1774 waitUntilAllRegionsAssigned(tableName
);
1775 return getConnection().getTable(tableName
);
1778 public Table
createTable(TableName tableName
, byte[][] families
,
1779 int numVersions
, int blockSize
, String cpName
) throws IOException
{
1780 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1781 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1782 for (byte[] family
: families
) {
1783 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1784 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1785 .setMaxVersions(numVersions
)
1786 .setBlocksize(blockSize
);
1787 if (isNewVersionBehaviorEnabled()) {
1788 familyDescriptor
.setNewVersionBehavior(true);
1790 tableDescriptor
.setColumnFamily(familyDescriptor
);
1792 if (cpName
!= null) {
1793 tableDescriptor
.setCoprocessor(cpName
);
1795 getAdmin().createTable(tableDescriptor
);
1796 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1798 waitUntilAllRegionsAssigned(tableName
);
1799 return getConnection().getTable(tableName
);
1806 * @param numVersions
1807 * @return A Table instance for the created table.
1808 * @throws IOException
1810 public Table
createTable(TableName tableName
, byte[][] families
,
1811 int[] numVersions
) throws IOException
{
1812 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1813 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1815 for (byte[] family
: families
) {
1816 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1817 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
1818 .setMaxVersions(numVersions
[i
]);
1819 if (isNewVersionBehaviorEnabled()) {
1820 familyDescriptor
.setNewVersionBehavior(true);
1822 tableDescriptor
.setColumnFamily(familyDescriptor
);
1825 getAdmin().createTable(tableDescriptor
);
1826 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1828 waitUntilAllRegionsAssigned(tableName
);
1829 return getConnection().getTable(tableName
);
1837 * @return A Table instance for the created table.
1838 * @throws IOException
1840 public Table
createTable(TableName tableName
, byte[] family
, byte[][] splitRows
)
1841 throws IOException
{
1842 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1843 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
1844 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1845 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
);
1846 if (isNewVersionBehaviorEnabled()) {
1847 familyDescriptor
.setNewVersionBehavior(true);
1849 tableDescriptor
.setColumnFamily(familyDescriptor
);
1850 getAdmin().createTable(tableDescriptor
, splitRows
);
1851 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1853 waitUntilAllRegionsAssigned(tableName
);
1854 return getConnection().getTable(tableName
);
1858 * Create a table with multiple regions.
1861 * @return A Table instance for the created table.
1862 * @throws IOException
1864 public Table
createMultiRegionTable(TableName tableName
, byte[] family
) throws IOException
{
1865 return createTable(tableName
, family
, KEYS_FOR_HBA_CREATE_TABLE
);
1869 * Modify a table, synchronous.
1870 * @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
1871 * {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
1872 * @see Admin#modifyTable(TableDescriptor)
1873 * @see <a href="https://issues.apache.org/jira/browse/HBASE-22002">HBASE-22002</a>
1876 public static void modifyTableSync(Admin admin
, TableDescriptor desc
)
1877 throws IOException
, InterruptedException
{
1878 admin
.modifyTable(desc
);
1882 * Set the number of Region replicas.
1884 public static void setReplicas(Admin admin
, TableName table
, int replicaCount
)
1885 throws IOException
, InterruptedException
{
1886 admin
.disableTable(table
);
1887 HTableDescriptor desc
= new HTableDescriptor(admin
.getDescriptor(table
));
1888 desc
.setRegionReplication(replicaCount
);
1889 admin
.modifyTable(desc
);
1890 admin
.enableTable(table
);
1894 * Drop an existing table
1895 * @param tableName existing table
1897 public void deleteTable(TableName tableName
) throws IOException
{
1899 getAdmin().disableTable(tableName
);
1900 } catch (TableNotEnabledException e
) {
1901 LOG
.debug("Table: " + tableName
+ " already disabled, so just deleting it.");
1903 getAdmin().deleteTable(tableName
);
1907 * Drop an existing table
1908 * @param tableName existing table
1910 public void deleteTableIfAny(TableName tableName
) throws IOException
{
1912 deleteTable(tableName
);
1913 } catch (TableNotFoundException e
) {
1918 // ==========================================================================
1919 // Canned table and table descriptor creation
1920 // TODO replace HBaseTestCase
1922 public final static byte [] fam1
= Bytes
.toBytes("colfamily11");
1923 public final static byte [] fam2
= Bytes
.toBytes("colfamily21");
1924 public final static byte [] fam3
= Bytes
.toBytes("colfamily31");
1925 public static final byte[][] COLUMNS
= {fam1
, fam2
, fam3
};
1926 private static final int MAXVERSIONS
= 3;
1928 public static final char FIRST_CHAR
= 'a';
1929 public static final char LAST_CHAR
= 'z';
1930 public static final byte [] START_KEY_BYTES
= {FIRST_CHAR
, FIRST_CHAR
, FIRST_CHAR
};
1931 public static final String START_KEY
= new String(START_KEY_BYTES
, HConstants
.UTF8_CHARSET
);
1933 public TableDescriptorBuilder
.ModifyableTableDescriptor
createModifyableTableDescriptor(
1934 final String name
) {
1935 return createModifyableTableDescriptor(TableName
.valueOf(name
),
1936 HColumnDescriptor
.DEFAULT_MIN_VERSIONS
,
1937 MAXVERSIONS
, HConstants
.FOREVER
, HColumnDescriptor
.DEFAULT_KEEP_DELETED
);
1940 public HTableDescriptor
createTableDescriptor(final TableName name
,
1941 final int minVersions
, final int versions
, final int ttl
, KeepDeletedCells keepDeleted
) {
1942 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1943 new TableDescriptorBuilder
.ModifyableTableDescriptor(name
);
1944 for (byte[] cfName
: new byte[][]{fam1
, fam2
, fam3
}) {
1945 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1946 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(cfName
)
1947 .setMinVersions(minVersions
)
1948 .setMaxVersions(versions
)
1949 .setKeepDeletedCells(keepDeleted
)
1950 .setBlockCacheEnabled(false)
1951 .setTimeToLive(ttl
);
1952 if (isNewVersionBehaviorEnabled()) {
1953 familyDescriptor
.setNewVersionBehavior(true);
1955 tableDescriptor
.setColumnFamily(familyDescriptor
);
1957 return new HTableDescriptor(tableDescriptor
);
1960 public TableDescriptorBuilder
.ModifyableTableDescriptor
createModifyableTableDescriptor(
1961 final TableName name
, final int minVersions
, final int versions
, final int ttl
,
1962 KeepDeletedCells keepDeleted
) {
1963 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1964 new TableDescriptorBuilder
.ModifyableTableDescriptor(name
);
1965 for (byte[] cfName
: new byte[][]{fam1
, fam2
, fam3
}) {
1966 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
1967 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(cfName
)
1968 .setMinVersions(minVersions
)
1969 .setMaxVersions(versions
)
1970 .setKeepDeletedCells(keepDeleted
)
1971 .setBlockCacheEnabled(false)
1972 .setTimeToLive(ttl
);
1973 if (isNewVersionBehaviorEnabled()) {
1974 familyDescriptor
.setNewVersionBehavior(true);
1976 tableDescriptor
.setColumnFamily(familyDescriptor
);
1978 return tableDescriptor
;
1982 * Create a table of name <code>name</code>.
1983 * @param name Name to give table.
1984 * @return Column descriptor.
1986 public HTableDescriptor
createTableDescriptor(final TableName name
) {
1987 return createTableDescriptor(name
, HColumnDescriptor
.DEFAULT_MIN_VERSIONS
,
1988 MAXVERSIONS
, HConstants
.FOREVER
, HColumnDescriptor
.DEFAULT_KEEP_DELETED
);
1991 public HTableDescriptor
createTableDescriptor(final TableName tableName
,
1993 return createTableDescriptor(tableName
, new byte[][] {family
}, 1);
1996 public HTableDescriptor
createTableDescriptor(final TableName tableName
,
1997 byte[][] families
, int maxVersions
) {
1998 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
1999 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
2001 for (byte[] family
: families
) {
2002 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
2003 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
)
2004 .setMaxVersions(maxVersions
);
2005 if (isNewVersionBehaviorEnabled()) {
2006 familyDescriptor
.setNewVersionBehavior(true);
2008 tableDescriptor
.setColumnFamily(familyDescriptor
);
2010 return new HTableDescriptor(tableDescriptor
);
2014 * Create an HRegion that writes to the local tmp dirs
2015 * @param desc a table descriptor indicating which table the region belongs to
2016 * @param startKey the start boundary of the region
2017 * @param endKey the end boundary of the region
2018 * @return a region that writes to local dir for testing
2020 public HRegion
createLocalHRegion(TableDescriptor desc
, byte[] startKey
, byte[] endKey
)
2021 throws IOException
{
2022 RegionInfo hri
= RegionInfoBuilder
.newBuilder(desc
.getTableName()).setStartKey(startKey
)
2023 .setEndKey(endKey
).build();
2024 return createLocalHRegion(hri
, desc
);
2028 * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
2029 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
2031 public HRegion
createLocalHRegion(RegionInfo info
, TableDescriptor desc
) throws IOException
{
2032 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), desc
);
2036 * Create an HRegion that writes to the local tmp dirs with specified wal
2037 * @param info regioninfo
2038 * @param desc table descriptor
2039 * @param wal wal for this region.
2040 * @return created hregion
2041 * @throws IOException
2043 public HRegion
createLocalHRegion(RegionInfo info
, TableDescriptor desc
, WAL wal
)
2044 throws IOException
{
2045 return HRegion
.createHRegion(info
, getDataTestDir(), getConfiguration(), desc
, wal
);
2054 * @return A region on which you must call
2055 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
2056 * @throws IOException
2058 public HRegion
createLocalHRegion(TableName tableName
, byte[] startKey
, byte[] stopKey
,
2059 boolean isReadOnly
, Durability durability
, WAL wal
, byte[]... families
) throws IOException
{
2060 return createLocalHRegionWithInMemoryFlags(tableName
,startKey
, stopKey
, isReadOnly
,
2061 durability
, wal
, null, families
);
2064 public HRegion
createLocalHRegionWithInMemoryFlags(TableName tableName
, byte[] startKey
,
2065 byte[] stopKey
, boolean isReadOnly
, Durability durability
, WAL wal
, boolean[] compactedMemStore
,
2066 byte[]... families
) throws IOException
{
2067 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
2068 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
2069 tableDescriptor
.setReadOnly(isReadOnly
);
2071 for (byte[] family
: families
) {
2072 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
2073 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(family
);
2074 if (compactedMemStore
!= null && i
< compactedMemStore
.length
) {
2075 familyDescriptor
.setInMemoryCompaction(MemoryCompactionPolicy
.BASIC
);
2077 familyDescriptor
.setInMemoryCompaction(MemoryCompactionPolicy
.NONE
);
2081 // Set default to be three versions.
2082 familyDescriptor
.setMaxVersions(Integer
.MAX_VALUE
);
2083 tableDescriptor
.setColumnFamily(familyDescriptor
);
2085 tableDescriptor
.setDurability(durability
);
2086 RegionInfo info
= RegionInfoBuilder
.newBuilder(tableDescriptor
.getTableName())
2087 .setStartKey(startKey
).setEndKey(stopKey
).build();
2088 return createLocalHRegion(info
, tableDescriptor
, wal
);
2092 // ==========================================================================
2095 * Provide an existing table name to truncate.
2096 * Scans the table and issues a delete for each row read.
2097 * @param tableName existing table
2098 * @return HTable to that new table
2099 * @throws IOException
2101 public Table
deleteTableData(TableName tableName
) throws IOException
{
2102 Table table
= getConnection().getTable(tableName
);
2103 Scan scan
= new Scan();
2104 ResultScanner resScan
= table
.getScanner(scan
);
2105 for(Result res
: resScan
) {
2106 Delete del
= new Delete(res
.getRow());
2109 resScan
= table
.getScanner(scan
);
2115 * Truncate a table using the admin command.
2116 * Effectively disables, deletes, and recreates the table.
2117 * @param tableName table which must exist.
2118 * @param preserveRegions keep the existing split points
2119 * @return HTable for the new table
2121 public Table
truncateTable(final TableName tableName
, final boolean preserveRegions
) throws
2123 Admin admin
= getAdmin();
2124 if (!admin
.isTableDisabled(tableName
)) {
2125 admin
.disableTable(tableName
);
2127 admin
.truncateTable(tableName
, preserveRegions
);
2128 return getConnection().getTable(tableName
);
2132 * Truncate a table using the admin command.
2133 * Effectively disables, deletes, and recreates the table.
2134 * For previous behavior of issuing row deletes, see
2136 * Expressly does not preserve regions of existing table.
2137 * @param tableName table which must exist.
2138 * @return HTable for the new table
2140 public Table
truncateTable(final TableName tableName
) throws IOException
{
2141 return truncateTable(tableName
, false);
2145 * Load table with rows from 'aaa' to 'zzz'.
2148 * @return Count of rows loaded.
2149 * @throws IOException
2151 public int loadTable(final Table t
, final byte[] f
) throws IOException
{
2152 return loadTable(t
, new byte[][] {f
});
2156 * Load table with rows from 'aaa' to 'zzz'.
2159 * @return Count of rows loaded.
2160 * @throws IOException
2162 public int loadTable(final Table t
, final byte[] f
, boolean writeToWAL
) throws IOException
{
2163 return loadTable(t
, new byte[][] {f
}, null, writeToWAL
);
2167 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2169 * @param f Array of Families to load
2170 * @return Count of rows loaded.
2171 * @throws IOException
2173 public int loadTable(final Table t
, final byte[][] f
) throws IOException
{
2174 return loadTable(t
, f
, null);
2178 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2180 * @param f Array of Families to load
2181 * @param value the values of the cells. If null is passed, the row key is used as value
2182 * @return Count of rows loaded.
2183 * @throws IOException
2185 public int loadTable(final Table t
, final byte[][] f
, byte[] value
) throws IOException
{
2186 return loadTable(t
, f
, value
, true);
2190 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2192 * @param f Array of Families to load
2193 * @param value the values of the cells. If null is passed, the row key is used as value
2194 * @return Count of rows loaded.
2195 * @throws IOException
2197 public int loadTable(final Table t
, final byte[][] f
, byte[] value
,
2198 boolean writeToWAL
) throws IOException
{
2199 List
<Put
> puts
= new ArrayList
<>();
2200 for (byte[] row
: HBaseTestingUtility
.ROWS
) {
2201 Put put
= new Put(row
);
2202 put
.setDurability(writeToWAL ? Durability
.USE_DEFAULT
: Durability
.SKIP_WAL
);
2203 for (int i
= 0; i
< f
.length
; i
++) {
2204 byte[] value1
= value
!= null ? value
: row
;
2205 put
.addColumn(f
[i
], f
[i
], value1
);
2213 /** A tracker for tracking and validating table rows
2214 * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])}
2216 public static class SeenRowTracker
{
2217 int dim
= 'z' - 'a' + 1;
2218 int[][][] seenRows
= new int[dim
][dim
][dim
]; //count of how many times the row is seen
2222 public SeenRowTracker(byte[] startRow
, byte[] stopRow
) {
2223 this.startRow
= startRow
;
2224 this.stopRow
= stopRow
;
2228 for (byte[] row
: ROWS
) {
2229 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])] = 0;
2237 public void addRow(byte[] row
) {
2238 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])]++;
2241 /** Validate that all the rows between startRow and stopRow are seen exactly once, and
2242 * all other rows none
2244 public void validate() {
2245 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2246 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2247 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2248 int count
= seenRows
[i(b1
)][i(b2
)][i(b3
)];
2249 int expectedCount
= 0;
2250 if (Bytes
.compareTo(new byte[] {b1
,b2
,b3
}, startRow
) >= 0
2251 && Bytes
.compareTo(new byte[] {b1
,b2
,b3
}, stopRow
) < 0) {
2254 if (count
!= expectedCount
) {
2255 String row
= new String(new byte[] {b1
,b2
,b3
}, StandardCharsets
.UTF_8
);
2256 throw new RuntimeException("Row:" + row
+ " has a seen count of " + count
+ " " +
2257 "instead of " + expectedCount
);
2265 public int loadRegion(final HRegion r
, final byte[] f
) throws IOException
{
2266 return loadRegion(r
, f
, false);
2269 public int loadRegion(final Region r
, final byte[] f
) throws IOException
{
2270 return loadRegion((HRegion
)r
, f
);
2274 * Load region with rows from 'aaa' to 'zzz'.
2277 * @param flush flush the cache if true
2278 * @return Count of rows loaded.
2279 * @throws IOException
2281 public int loadRegion(final HRegion r
, final byte[] f
, final boolean flush
)
2282 throws IOException
{
2283 byte[] k
= new byte[3];
2285 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2286 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2287 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2291 Put put
= new Put(k
);
2292 put
.setDurability(Durability
.SKIP_WAL
);
2293 put
.addColumn(f
, null, k
);
2294 if (r
.getWAL() == null) {
2295 put
.setDurability(Durability
.SKIP_WAL
);
2297 int preRowCount
= rowCount
;
2299 int maxPause
= 1000;
2300 while (rowCount
== preRowCount
) {
2304 } catch (RegionTooBusyException e
) {
2305 pause
= (pause
* 2 >= maxPause
) ? maxPause
: pause
* 2;
2306 Threads
.sleep(pause
);
2318 public void loadNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
2319 throws IOException
{
2320 for (int i
= startRow
; i
< endRow
; i
++) {
2321 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2322 Put put
= new Put(data
);
2323 put
.addColumn(f
, null, data
);
2328 public void loadRandomRows(final Table t
, final byte[] f
, int rowSize
, int totalRows
)
2329 throws IOException
{
2330 Random r
= new Random();
2331 byte[] row
= new byte[rowSize
];
2332 for (int i
= 0; i
< totalRows
; i
++) {
2334 Put put
= new Put(row
);
2335 put
.addColumn(f
, new byte[]{0}, new byte[]{0});
2340 public void verifyNumericRows(Table table
, final byte[] f
, int startRow
, int endRow
,
2342 throws IOException
{
2343 for (int i
= startRow
; i
< endRow
; i
++) {
2344 String failMsg
= "Failed verification of row :" + i
;
2345 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2346 Get get
= new Get(data
);
2347 get
.setReplicaId(replicaId
);
2348 get
.setConsistency(Consistency
.TIMELINE
);
2349 Result result
= table
.get(get
);
2350 assertTrue(failMsg
, result
.containsColumn(f
, null));
2351 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
2352 Cell cell
= result
.getColumnLatestCell(f
, null);
2354 Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(), cell
.getValueOffset(),
2355 cell
.getValueLength()));
2359 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
)
2360 throws IOException
{
2361 verifyNumericRows((HRegion
)region
, f
, startRow
, endRow
);
2364 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
)
2365 throws IOException
{
2366 verifyNumericRows(region
, f
, startRow
, endRow
, true);
2369 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
,
2370 final boolean present
) throws IOException
{
2371 verifyNumericRows((HRegion
)region
, f
, startRow
, endRow
, present
);
2374 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
,
2375 final boolean present
) throws IOException
{
2376 for (int i
= startRow
; i
< endRow
; i
++) {
2377 String failMsg
= "Failed verification of row :" + i
;
2378 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2379 Result result
= region
.get(new Get(data
));
2381 boolean hasResult
= result
!= null && !result
.isEmpty();
2382 assertEquals(failMsg
+ result
, present
, hasResult
);
2383 if (!present
) continue;
2385 assertTrue(failMsg
, result
.containsColumn(f
, null));
2386 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
2387 Cell cell
= result
.getColumnLatestCell(f
, null);
2389 Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(), cell
.getValueOffset(),
2390 cell
.getValueLength()));
2394 public void deleteNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
2395 throws IOException
{
2396 for (int i
= startRow
; i
< endRow
; i
++) {
2397 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2398 Delete delete
= new Delete(data
);
2399 delete
.addFamily(f
);
2405 * Return the number of rows in the given table.
2406 * @param table to count rows
2407 * @return count of rows
2409 public static int countRows(final Table table
) throws IOException
{
2410 return countRows(table
, new Scan());
2413 public static int countRows(final Table table
, final Scan scan
) throws IOException
{
2414 try (ResultScanner results
= table
.getScanner(scan
)) {
2416 while (results
.next() != null) {
2423 public int countRows(final Table table
, final byte[]... families
) throws IOException
{
2424 Scan scan
= new Scan();
2425 for (byte[] family
: families
) {
2426 scan
.addFamily(family
);
2428 return countRows(table
, scan
);
2432 * Return the number of rows in the given table.
2434 public int countRows(final TableName tableName
) throws IOException
{
2435 Table table
= getConnection().getTable(tableName
);
2437 return countRows(table
);
2443 public int countRows(final Region region
) throws IOException
{
2444 return countRows(region
, new Scan());
2447 public int countRows(final Region region
, final Scan scan
) throws IOException
{
2448 InternalScanner scanner
= region
.getScanner(scan
);
2450 return countRows(scanner
);
2456 public int countRows(final InternalScanner scanner
) throws IOException
{
2457 int scannedCount
= 0;
2458 List
<Cell
> results
= new ArrayList
<>();
2459 boolean hasMore
= true;
2461 hasMore
= scanner
.next(results
);
2462 scannedCount
+= results
.size();
2465 return scannedCount
;
2469 * Return an md5 digest of the entire contents of a table.
2471 public String
checksumRows(final Table table
) throws Exception
{
2473 Scan scan
= new Scan();
2474 ResultScanner results
= table
.getScanner(scan
);
2475 MessageDigest digest
= MessageDigest
.getInstance("MD5");
2476 for (Result res
: results
) {
2477 digest
.update(res
.getRow());
2480 return digest
.toString();
2483 /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2484 public static final byte[][] ROWS
= new byte[(int) Math
.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2487 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2488 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2489 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2499 public static final byte[][] KEYS
= {
2500 HConstants
.EMPTY_BYTE_ARRAY
, Bytes
.toBytes("bbb"),
2501 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"),
2502 Bytes
.toBytes("fff"), Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"),
2503 Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"), Bytes
.toBytes("kkk"),
2504 Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2505 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"),
2506 Bytes
.toBytes("rrr"), Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"),
2507 Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"), Bytes
.toBytes("www"),
2508 Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy")
2511 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE
= {
2512 Bytes
.toBytes("bbb"),
2513 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"),
2514 Bytes
.toBytes("fff"), Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"),
2515 Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"), Bytes
.toBytes("kkk"),
2516 Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2517 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"),
2518 Bytes
.toBytes("rrr"), Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"),
2519 Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"), Bytes
.toBytes("www"),
2520 Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy"), Bytes
.toBytes("zzz")
2524 * Create rows in hbase:meta for regions of the specified table with the specified
2525 * start keys. The first startKey should be a 0 length byte array if you
2526 * want to form a proper range of regions.
2530 * @return list of region info for regions added to meta
2531 * @throws IOException
2533 public List
<RegionInfo
> createMultiRegionsInMeta(final Configuration conf
,
2534 final TableDescriptor htd
, byte [][] startKeys
)
2535 throws IOException
{
2536 Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2537 Arrays
.sort(startKeys
, Bytes
.BYTES_COMPARATOR
);
2538 List
<RegionInfo
> newRegions
= new ArrayList
<>(startKeys
.length
);
2540 .updateTableState(getConnection(), htd
.getTableName(), TableState
.State
.ENABLED
);
2542 for (int i
= 0; i
< startKeys
.length
; i
++) {
2543 int j
= (i
+ 1) % startKeys
.length
;
2544 RegionInfo hri
= RegionInfoBuilder
.newBuilder(htd
.getTableName())
2545 .setStartKey(startKeys
[i
])
2546 .setEndKey(startKeys
[j
])
2548 MetaTableAccessor
.addRegionToMeta(getConnection(), hri
);
2549 newRegions
.add(hri
);
2557 * Create an unmanaged WAL. Be sure to close it when you're through.
2559 public static WAL
createWal(final Configuration conf
, final Path rootDir
, final RegionInfo hri
)
2560 throws IOException
{
2561 // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2562 // unless I pass along via the conf.
2563 Configuration confForWAL
= new Configuration(conf
);
2564 confForWAL
.set(HConstants
.HBASE_DIR
, rootDir
.toString());
2565 return new WALFactory(confForWAL
, "hregion-" + RandomStringUtils
.randomNumeric(8)).getWAL(hri
);
2570 * Create a region with it's own WAL. Be sure to call
2571 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2573 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2574 final Configuration conf
, final TableDescriptor htd
) throws IOException
{
2575 return createRegionAndWAL(info
, rootDir
, conf
, htd
, true);
2579 * Create a region with it's own WAL. Be sure to call
2580 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2582 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2583 final Configuration conf
, final TableDescriptor htd
, BlockCache blockCache
)
2584 throws IOException
{
2585 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2586 region
.setBlockCache(blockCache
);
2587 region
.initialize();
2591 * Create a region with it's own WAL. Be sure to call
2592 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2594 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2595 final Configuration conf
, final TableDescriptor htd
, MobFileCache mobFileCache
)
2596 throws IOException
{
2597 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2598 region
.setMobFileCache(mobFileCache
);
2599 region
.initialize();
2604 * Create a region with it's own WAL. Be sure to call
2605 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2607 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2608 final Configuration conf
, final TableDescriptor htd
, boolean initialize
)
2609 throws IOException
{
2610 ChunkCreator
.initialize(MemStoreLABImpl
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null);
2611 WAL wal
= createWal(conf
, rootDir
, info
);
2612 return HRegion
.createHRegion(info
, rootDir
, conf
, htd
, wal
, initialize
);
2616 * Returns all rows from the hbase:meta table.
2618 * @throws IOException When reading the rows fails.
2620 public List
<byte[]> getMetaTableRows() throws IOException
{
2621 // TODO: Redo using MetaTableAccessor class
2622 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2623 List
<byte[]> rows
= new ArrayList
<>();
2624 ResultScanner s
= t
.getScanner(new Scan());
2625 for (Result result
: s
) {
2626 LOG
.info("getMetaTableRows: row -> " +
2627 Bytes
.toStringBinary(result
.getRow()));
2628 rows
.add(result
.getRow());
2636 * Returns all rows from the hbase:meta table for a given user table
2638 * @throws IOException When reading the rows fails.
2640 public List
<byte[]> getMetaTableRows(TableName tableName
) throws IOException
{
2641 // TODO: Redo using MetaTableAccessor.
2642 Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
2643 List
<byte[]> rows
= new ArrayList
<>();
2644 ResultScanner s
= t
.getScanner(new Scan());
2645 for (Result result
: s
) {
2646 RegionInfo info
= CatalogFamilyFormat
.getRegionInfo(result
);
2648 LOG
.error("No region info for row " + Bytes
.toString(result
.getRow()));
2649 // TODO figure out what to do for this new hosed case.
2653 if (info
.getTable().equals(tableName
)) {
2654 LOG
.info("getMetaTableRows: row -> " +
2655 Bytes
.toStringBinary(result
.getRow()) + info
);
2656 rows
.add(result
.getRow());
2665 * Returns all regions of the specified table
2667 * @param tableName the table name
2668 * @return all regions of the specified table
2669 * @throws IOException when getting the regions fails.
2671 private List
<RegionInfo
> getRegions(TableName tableName
) throws IOException
{
2672 try (Admin admin
= getConnection().getAdmin()) {
2673 return admin
.getRegions(tableName
);
2678 * Find any other region server which is different from the one identified by parameter
2680 * @return another region server
2682 public HRegionServer
getOtherRegionServer(HRegionServer rs
) {
2683 for (JVMClusterUtil
.RegionServerThread rst
:
2684 getMiniHBaseCluster().getRegionServerThreads()) {
2685 if (!(rst
.getRegionServer() == rs
)) {
2686 return rst
.getRegionServer();
2693 * Tool to get the reference to the region server object that holds the
2694 * region of the specified user table.
2695 * @param tableName user table to lookup in hbase:meta
2696 * @return region server that holds it, null if the row doesn't exist
2697 * @throws IOException
2698 * @throws InterruptedException
2700 public HRegionServer
getRSForFirstRegionInTable(TableName tableName
)
2701 throws IOException
, InterruptedException
{
2702 List
<RegionInfo
> regions
= getRegions(tableName
);
2703 if (regions
== null || regions
.isEmpty()) {
2706 LOG
.debug("Found " + regions
.size() + " regions for table " +
2709 byte[] firstRegionName
= regions
.stream()
2710 .filter(r
-> !r
.isOffline())
2711 .map(RegionInfo
::getRegionName
)
2713 .orElseThrow(() -> new IOException("online regions not found in table " + tableName
));
2715 LOG
.debug("firstRegionName=" + Bytes
.toString(firstRegionName
));
2716 long pause
= getConfiguration().getLong(HConstants
.HBASE_CLIENT_PAUSE
,
2717 HConstants
.DEFAULT_HBASE_CLIENT_PAUSE
);
2718 int numRetries
= getConfiguration().getInt(HConstants
.HBASE_CLIENT_RETRIES_NUMBER
,
2719 HConstants
.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER
);
2720 RetryCounter retrier
= new RetryCounter(numRetries
+1, (int)pause
, TimeUnit
.MICROSECONDS
);
2721 while(retrier
.shouldRetry()) {
2722 int index
= getMiniHBaseCluster().getServerWith(firstRegionName
);
2724 return getMiniHBaseCluster().getRegionServerThreads().get(index
).getRegionServer();
2726 // Came back -1. Region may not be online yet. Sleep a while.
2727 retrier
.sleepUntilNextRetry();
2733 * Starts a <code>MiniMRCluster</code> with a default number of
2734 * <code>TaskTracker</code>'s.
2736 * @throws IOException When starting the cluster fails.
2738 public MiniMRCluster
startMiniMapReduceCluster() throws IOException
{
2739 // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2741 "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2743 startMiniMapReduceCluster(2);
2748 * Tasktracker has a bug where changing the hadoop.log.dir system property
2749 * will not change its internal static LOG_DIR variable.
2751 private void forceChangeTaskLogDir() {
2754 logDirField
= TaskLog
.class.getDeclaredField("LOG_DIR");
2755 logDirField
.setAccessible(true);
2757 Field modifiersField
= Field
.class.getDeclaredField("modifiers");
2758 modifiersField
.setAccessible(true);
2759 modifiersField
.setInt(logDirField
, logDirField
.getModifiers() & ~Modifier
.FINAL
);
2761 logDirField
.set(null, new File(hadoopLogDir
, "userlogs"));
2762 } catch (SecurityException e
) {
2763 throw new RuntimeException(e
);
2764 } catch (NoSuchFieldException e
) {
2765 // TODO Auto-generated catch block
2766 throw new RuntimeException(e
);
2767 } catch (IllegalArgumentException e
) {
2768 throw new RuntimeException(e
);
2769 } catch (IllegalAccessException e
) {
2770 throw new RuntimeException(e
);
2775 * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2777 * @param servers The number of <code>TaskTracker</code>'s to start.
2778 * @throws IOException When starting the cluster fails.
2780 private void startMiniMapReduceCluster(final int servers
) throws IOException
{
2781 if (mrCluster
!= null) {
2782 throw new IllegalStateException("MiniMRCluster is already running");
2784 LOG
.info("Starting mini mapreduce cluster...");
2785 setupClusterTestDir();
2786 createDirsAndSetProperties();
2788 forceChangeTaskLogDir();
2790 //// hadoop2 specific settings
2791 // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2792 // we up the VM usable so that processes don't get killed.
2793 conf
.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f
);
2795 // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2796 // this avoids the problem by disabling speculative task execution in tests.
2797 conf
.setBoolean("mapreduce.map.speculative", false);
2798 conf
.setBoolean("mapreduce.reduce.speculative", false);
2801 // Allow the user to override FS URI for this map-reduce cluster to use.
2802 mrCluster
= new MiniMRCluster(servers
,
2803 FS_URI
!= null ? FS_URI
: FileSystem
.get(conf
).getUri().toString(), 1,
2804 null, null, new JobConf(this.conf
));
2805 JobConf jobConf
= MapreduceTestingShim
.getJobConf(mrCluster
);
2806 if (jobConf
== null) {
2807 jobConf
= mrCluster
.createJobConf();
2810 jobConf
.set("mapreduce.cluster.local.dir",
2811 conf
.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not
2812 LOG
.info("Mini mapreduce cluster started");
2814 // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2815 // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
2816 // necessary config properties here. YARN-129 required adding a few properties.
2817 conf
.set("mapreduce.jobtracker.address", jobConf
.get("mapreduce.jobtracker.address"));
2818 // this for mrv2 support; mr1 ignores this
2819 conf
.set("mapreduce.framework.name", "yarn");
2820 conf
.setBoolean("yarn.is.minicluster", true);
2821 String rmAddress
= jobConf
.get("yarn.resourcemanager.address");
2822 if (rmAddress
!= null) {
2823 conf
.set("yarn.resourcemanager.address", rmAddress
);
2825 String historyAddress
= jobConf
.get("mapreduce.jobhistory.address");
2826 if (historyAddress
!= null) {
2827 conf
.set("mapreduce.jobhistory.address", historyAddress
);
2829 String schedulerAddress
=
2830 jobConf
.get("yarn.resourcemanager.scheduler.address");
2831 if (schedulerAddress
!= null) {
2832 conf
.set("yarn.resourcemanager.scheduler.address", schedulerAddress
);
2834 String mrJobHistoryWebappAddress
=
2835 jobConf
.get("mapreduce.jobhistory.webapp.address");
2836 if (mrJobHistoryWebappAddress
!= null) {
2837 conf
.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress
);
2839 String yarnRMWebappAddress
=
2840 jobConf
.get("yarn.resourcemanager.webapp.address");
2841 if (yarnRMWebappAddress
!= null) {
2842 conf
.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress
);
2847 * Stops the previously started <code>MiniMRCluster</code>.
2849 public void shutdownMiniMapReduceCluster() {
2850 if (mrCluster
!= null) {
2851 LOG
.info("Stopping mini mapreduce cluster...");
2852 mrCluster
.shutdown();
2854 LOG
.info("Mini mapreduce cluster stopped");
2856 // Restore configuration to point to local jobtracker
2857 conf
.set("mapreduce.jobtracker.address", "local");
2861 * Create a stubbed out RegionServerService, mainly for getting FS.
2863 public RegionServerServices
createMockRegionServerService() throws IOException
{
2864 return createMockRegionServerService((ServerName
)null);
2868 * Create a stubbed out RegionServerService, mainly for getting FS.
2869 * This version is used by TestTokenAuthentication
2871 public RegionServerServices
createMockRegionServerService(RpcServerInterface rpc
) throws
2873 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher());
2874 rss
.setFileSystem(getTestFileSystem());
2875 rss
.setRpcServer(rpc
);
2880 * Create a stubbed out RegionServerService, mainly for getting FS.
2881 * This version is used by TestOpenRegionHandler
2883 public RegionServerServices
createMockRegionServerService(ServerName name
) throws IOException
{
2884 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher(), name
);
2885 rss
.setFileSystem(getTestFileSystem());
2890 * Switches the logger for the given class to DEBUG level.
2891 * @param clazz The class for which to switch to debug logging.
2892 * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
2893 * HBase only uses log4j. You should do this by your own as it you know which log
2894 * framework you are using then set the log level to debug is very easy.
2897 public void enableDebug(Class
<?
> clazz
) {
2898 Log4jUtils
.enableDebug(clazz
);
2902 * Expire the Master's session
2905 public void expireMasterSession() throws Exception
{
2906 HMaster master
= getMiniHBaseCluster().getMaster();
2907 expireSession(master
.getZooKeeper(), false);
2911 * Expire a region server's session
2912 * @param index which RS
2914 public void expireRegionServerSession(int index
) throws Exception
{
2915 HRegionServer rs
= getMiniHBaseCluster().getRegionServer(index
);
2916 expireSession(rs
.getZooKeeper(), false);
2917 decrementMinRegionServerCount();
2920 private void decrementMinRegionServerCount() {
2921 // decrement the count for this.conf, for newly spwaned master
2922 // this.hbaseCluster shares this configuration too
2923 decrementMinRegionServerCount(getConfiguration());
2925 // each master thread keeps a copy of configuration
2926 for (MasterThread master
: getHBaseCluster().getMasterThreads()) {
2927 decrementMinRegionServerCount(master
.getMaster().getConfiguration());
2931 private void decrementMinRegionServerCount(Configuration conf
) {
2932 int currentCount
= conf
.getInt(
2933 ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
2934 if (currentCount
!= -1) {
2935 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
,
2936 Math
.max(currentCount
- 1, 1));
2940 public void expireSession(ZKWatcher nodeZK
) throws Exception
{
2941 expireSession(nodeZK
, false);
2945 * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2946 * http://hbase.apache.org/book.html#trouble.zookeeper
2947 * There are issues when doing this:
2948 * [1] http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
2949 * [2] https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2951 * @param nodeZK - the ZK watcher to expire
2952 * @param checkStatus - true to check if we can create a Table with the
2953 * current configuration.
2955 public void expireSession(ZKWatcher nodeZK
, boolean checkStatus
)
2957 Configuration c
= new Configuration(this.conf
);
2958 String quorumServers
= ZKConfig
.getZKQuorumServersString(c
);
2959 ZooKeeper zk
= nodeZK
.getRecoverableZooKeeper().getZooKeeper();
2960 byte[] password
= zk
.getSessionPasswd();
2961 long sessionID
= zk
.getSessionId();
2963 // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2964 // so we create a first watcher to be sure that the
2965 // event was sent. We expect that if our watcher receives the event
2966 // other watchers on the same machine will get is as well.
2967 // When we ask to close the connection, ZK does not close it before
2968 // we receive all the events, so don't have to capture the event, just
2969 // closing the connection should be enough.
2970 ZooKeeper monitor
= new ZooKeeper(quorumServers
,
2971 1000, new org
.apache
.zookeeper
.Watcher(){
2973 public void process(WatchedEvent watchedEvent
) {
2974 LOG
.info("Monitor ZKW received event="+watchedEvent
);
2976 } , sessionID
, password
);
2979 ZooKeeper newZK
= new ZooKeeper(quorumServers
,
2980 1000, EmptyWatcher
.instance
, sessionID
, password
);
2982 //ensure that we have connection to the server before closing down, otherwise
2983 //the close session event will be eaten out before we start CONNECTING state
2984 long start
= System
.currentTimeMillis();
2985 while (newZK
.getState() != States
.CONNECTED
2986 && System
.currentTimeMillis() - start
< 1000) {
2990 LOG
.info("ZK Closed Session 0x" + Long
.toHexString(sessionID
));
2992 // Now closing & waiting to be sure that the clients get it.
2996 getConnection().getTable(TableName
.META_TABLE_NAME
).close();
3001 * Get the Mini HBase cluster.
3003 * @return hbase cluster
3004 * @see #getHBaseClusterInterface()
3006 public MiniHBaseCluster
getHBaseCluster() {
3007 return getMiniHBaseCluster();
3011 * Returns the HBaseCluster instance.
3012 * <p>Returned object can be any of the subclasses of HBaseCluster, and the
3013 * tests referring this should not assume that the cluster is a mini cluster or a
3014 * distributed one. If the test only works on a mini cluster, then specific
3015 * method {@link #getMiniHBaseCluster()} can be used instead w/o the
3016 * need to type-cast.
3018 public HBaseCluster
getHBaseClusterInterface() {
3019 //implementation note: we should rename this method as #getHBaseCluster(),
3020 //but this would require refactoring 90+ calls.
3021 return hbaseCluster
;
3024 private void initConnection() throws IOException
{
3025 User user
= UserProvider
.instantiate(conf
).getCurrent();
3026 this.asyncConnection
= ClusterConnectionFactory
.createAsyncClusterConnection(conf
, null, user
);
3030 * Resets the connections so that the next time getConnection() is called, a new connection is
3031 * created. This is needed in cases where the entire cluster / all the masters are shutdown and
3032 * the connection is not valid anymore.
3033 * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are
3034 * written, not all start() stop() calls go through this class. Most tests directly operate on
3035 * the underlying mini/local hbase cluster. That makes it difficult for this wrapper class to
3036 * maintain the connection state automatically. Cleaning this is a much bigger refactor.
3038 public void invalidateConnection() throws IOException
{
3040 // Update the master addresses if they changed.
3041 final String masterConfigBefore
= conf
.get(HConstants
.MASTER_ADDRS_KEY
);
3042 final String masterConfAfter
= getMiniHBaseCluster().conf
.get(HConstants
.MASTER_ADDRS_KEY
);
3043 LOG
.info("Invalidated connection. Updating master addresses before: {} after: {}",
3044 masterConfigBefore
, masterConfAfter
);
3045 conf
.set(HConstants
.MASTER_ADDRS_KEY
,
3046 getMiniHBaseCluster().conf
.get(HConstants
.MASTER_ADDRS_KEY
));
3050 * Get a Connection to the cluster. Not thread-safe (This class needs a lot of work to make it
3052 * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
3054 public Connection
getConnection() throws IOException
{
3055 if (this.asyncConnection
== null) {
3058 return this.asyncConnection
.toConnection();
3061 public AsyncClusterConnection
getAsyncConnection() throws IOException
{
3062 if (this.asyncConnection
== null) {
3065 return this.asyncConnection
;
3068 public void closeConnection() throws IOException
{
3069 Closeables
.close(hbaseAdmin
, true);
3070 Closeables
.close(asyncConnection
, true);
3071 this.hbaseAdmin
= null;
3072 this.asyncConnection
= null;
3076 * Returns an Admin instance which is shared between HBaseTestingUtility instance users.
3077 * Closing it has no effect, it will be closed automatically when the cluster shutdowns
3079 public synchronized Admin
getAdmin() throws IOException
{
3080 if (hbaseAdmin
== null){
3081 this.hbaseAdmin
= getConnection().getAdmin();
3086 private Admin hbaseAdmin
= null;
3089 * Returns an {@link Hbck} instance. Needs be closed when done.
3091 public Hbck
getHbck() throws IOException
{
3092 return getConnection().getHbck();
3096 * Unassign the named region.
3098 * @param regionName The region to unassign.
3100 public void unassignRegion(String regionName
) throws IOException
{
3101 unassignRegion(Bytes
.toBytes(regionName
));
3105 * Unassign the named region.
3107 * @param regionName The region to unassign.
3109 public void unassignRegion(byte[] regionName
) throws IOException
{
3110 getAdmin().unassign(regionName
, true);
3114 * Closes the region containing the given row.
3116 * @param row The row to find the containing region.
3117 * @param table The table to find the region.
3119 public void unassignRegionByRow(String row
, RegionLocator table
) throws IOException
{
3120 unassignRegionByRow(Bytes
.toBytes(row
), table
);
3124 * Closes the region containing the given row.
3126 * @param row The row to find the containing region.
3127 * @param table The table to find the region.
3128 * @throws IOException
3130 public void unassignRegionByRow(byte[] row
, RegionLocator table
) throws IOException
{
3131 HRegionLocation hrl
= table
.getRegionLocation(row
);
3132 unassignRegion(hrl
.getRegion().getRegionName());
3136 * Retrieves a splittable region randomly from tableName
3138 * @param tableName name of table
3139 * @param maxAttempts maximum number of attempts, unlimited for value of -1
3140 * @return the HRegion chosen, null if none was found within limit of maxAttempts
3142 public HRegion
getSplittableRegion(TableName tableName
, int maxAttempts
) {
3143 List
<HRegion
> regions
= getHBaseCluster().getRegions(tableName
);
3144 int regCount
= regions
.size();
3145 Set
<Integer
> attempted
= new HashSet
<>();
3149 regions
= getHBaseCluster().getRegions(tableName
);
3150 if (regCount
!= regions
.size()) {
3151 // if there was region movement, clear attempted Set
3154 regCount
= regions
.size();
3155 // There are chances that before we get the region for the table from an RS the region may
3156 // be going for CLOSE. This may be because online schema change is enabled
3158 idx
= random
.nextInt(regCount
);
3159 // if we have just tried this region, there is no need to try again
3160 if (attempted
.contains(idx
))
3163 regions
.get(idx
).checkSplit();
3164 return regions
.get(idx
);
3165 } catch (Exception ex
) {
3166 LOG
.warn("Caught exception", ex
);
3171 } while (maxAttempts
== -1 || attempts
< maxAttempts
);
3175 public MiniDFSCluster
getDFSCluster() {
3179 public void setDFSCluster(MiniDFSCluster cluster
) throws IllegalStateException
, IOException
{
3180 setDFSCluster(cluster
, true);
3184 * Set the MiniDFSCluster
3185 * @param cluster cluster to use
3186 * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
3188 * @throws IllegalStateException if the passed cluster is up when it is required to be down
3189 * @throws IOException if the FileSystem could not be set from the passed dfs cluster
3191 public void setDFSCluster(MiniDFSCluster cluster
, boolean requireDown
)
3192 throws IllegalStateException
, IOException
{
3193 if (dfsCluster
!= null && requireDown
&& dfsCluster
.isClusterUp()) {
3194 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3196 this.dfsCluster
= cluster
;
3200 public FileSystem
getTestFileSystem() throws IOException
{
3201 return HFileSystem
.get(conf
);
3205 * Wait until all regions in a table have been assigned. Waits default timeout before giving up
3207 * @param table Table to wait on.
3208 * @throws InterruptedException
3209 * @throws IOException
3211 public void waitTableAvailable(TableName table
)
3212 throws InterruptedException
, IOException
{
3213 waitTableAvailable(table
.getName(), 30000);
3216 public void waitTableAvailable(TableName table
, long timeoutMillis
)
3217 throws InterruptedException
, IOException
{
3218 waitFor(timeoutMillis
, predicateTableAvailable(table
));
3222 * Wait until all regions in a table have been assigned
3223 * @param table Table to wait on.
3224 * @param timeoutMillis Timeout.
3226 public void waitTableAvailable(byte[] table
, long timeoutMillis
)
3227 throws InterruptedException
, IOException
{
3228 waitFor(timeoutMillis
, predicateTableAvailable(TableName
.valueOf(table
)));
3231 public String
explainTableAvailability(TableName tableName
) throws IOException
{
3232 String msg
= explainTableState(tableName
, TableState
.State
.ENABLED
) + ", ";
3233 if (getHBaseCluster().getMaster().isAlive()) {
3234 Map
<RegionInfo
, ServerName
> assignments
= getHBaseCluster().getMaster().getAssignmentManager()
3235 .getRegionStates().getRegionAssignments();
3236 final List
<Pair
<RegionInfo
, ServerName
>> metaLocations
=
3237 MetaTableAccessor
.getTableRegionsAndLocations(asyncConnection
.toConnection(), tableName
);
3238 for (Pair
<RegionInfo
, ServerName
> metaLocation
: metaLocations
) {
3239 RegionInfo hri
= metaLocation
.getFirst();
3240 ServerName sn
= metaLocation
.getSecond();
3241 if (!assignments
.containsKey(hri
)) {
3242 msg
+= ", region " + hri
+ " not assigned, but found in meta, it expected to be on " + sn
;
3244 } else if (sn
== null) {
3245 msg
+= ", region " + hri
+ " assigned, but has no server in meta";
3246 } else if (!sn
.equals(assignments
.get(hri
))) {
3247 msg
+= ", region " + hri
+ " assigned, but has different servers in meta and AM ( " +
3248 sn
+ " <> " + assignments
.get(hri
);
3255 public String
explainTableState(final TableName table
, TableState
.State state
)
3256 throws IOException
{
3257 TableState tableState
= MetaTableAccessor
.getTableState(asyncConnection
.toConnection(), table
);
3258 if (tableState
== null) {
3259 return "TableState in META: No table state in META for table " + table
+
3260 " last state in meta (including deleted is " + findLastTableState(table
) + ")";
3261 } else if (!tableState
.inStates(state
)) {
3262 return "TableState in META: Not " + state
+ " state, but " + tableState
;
3264 return "TableState in META: OK";
3269 public TableState
findLastTableState(final TableName table
) throws IOException
{
3270 final AtomicReference
<TableState
> lastTableState
= new AtomicReference
<>(null);
3271 ClientMetaTableAccessor
.Visitor visitor
= new ClientMetaTableAccessor
.Visitor() {
3273 public boolean visit(Result r
) throws IOException
{
3274 if (!Arrays
.equals(r
.getRow(), table
.getName())) {
3277 TableState state
= CatalogFamilyFormat
.getTableState(r
);
3278 if (state
!= null) {
3279 lastTableState
.set(state
);
3284 MetaTableAccessor
.scanMeta(asyncConnection
.toConnection(), null, null,
3285 ClientMetaTableAccessor
.QueryType
.TABLE
, Integer
.MAX_VALUE
, visitor
);
3286 return lastTableState
.get();
3290 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
3291 * regions have been all assigned. Will timeout after default period (30 seconds)
3292 * Tolerates nonexistent table.
3293 * @param table the table to wait on.
3294 * @throws InterruptedException if interrupted while waiting
3295 * @throws IOException if an IO problem is encountered
3297 public void waitTableEnabled(TableName table
)
3298 throws InterruptedException
, IOException
{
3299 waitTableEnabled(table
, 30000);
3303 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
3304 * regions have been all assigned.
3305 * @see #waitTableEnabled(TableName, long)
3306 * @param table Table to wait on.
3307 * @param timeoutMillis Time to wait on it being marked enabled.
3308 * @throws InterruptedException
3309 * @throws IOException
3311 public void waitTableEnabled(byte[] table
, long timeoutMillis
)
3312 throws InterruptedException
, IOException
{
3313 waitTableEnabled(TableName
.valueOf(table
), timeoutMillis
);
3316 public void waitTableEnabled(TableName table
, long timeoutMillis
)
3317 throws IOException
{
3318 waitFor(timeoutMillis
, predicateTableEnabled(table
));
3322 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3323 * Will timeout after default period (30 seconds)
3324 * @param table Table to wait on.
3325 * @throws InterruptedException
3326 * @throws IOException
3328 public void waitTableDisabled(byte[] table
)
3329 throws InterruptedException
, IOException
{
3330 waitTableDisabled(table
, 30000);
3333 public void waitTableDisabled(TableName table
, long millisTimeout
)
3334 throws InterruptedException
, IOException
{
3335 waitFor(millisTimeout
, predicateTableDisabled(table
));
3339 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3340 * @param table Table to wait on.
3341 * @param timeoutMillis Time to wait on it being marked disabled.
3342 * @throws InterruptedException
3343 * @throws IOException
3345 public void waitTableDisabled(byte[] table
, long timeoutMillis
)
3346 throws InterruptedException
, IOException
{
3347 waitTableDisabled(TableName
.valueOf(table
), timeoutMillis
);
3351 * Make sure that at least the specified number of region servers
3353 * @param num minimum number of region servers that should be running
3354 * @return true if we started some servers
3355 * @throws IOException
3357 public boolean ensureSomeRegionServersAvailable(final int num
)
3358 throws IOException
{
3359 boolean startedServer
= false;
3360 MiniHBaseCluster hbaseCluster
= getMiniHBaseCluster();
3361 for (int i
=hbaseCluster
.getLiveRegionServerThreads().size(); i
<num
; ++i
) {
3362 LOG
.info("Started new server=" + hbaseCluster
.startRegionServer());
3363 startedServer
= true;
3366 return startedServer
;
3371 * Make sure that at least the specified number of region servers
3372 * are running. We don't count the ones that are currently stopping or are
3374 * @param num minimum number of region servers that should be running
3375 * @return true if we started some servers
3376 * @throws IOException
3378 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num
)
3379 throws IOException
{
3380 boolean startedServer
= ensureSomeRegionServersAvailable(num
);
3382 int nonStoppedServers
= 0;
3383 for (JVMClusterUtil
.RegionServerThread rst
:
3384 getMiniHBaseCluster().getRegionServerThreads()) {
3386 HRegionServer hrs
= rst
.getRegionServer();
3387 if (hrs
.isStopping() || hrs
.isStopped()) {
3388 LOG
.info("A region server is stopped or stopping:"+hrs
);
3390 nonStoppedServers
++;
3393 for (int i
=nonStoppedServers
; i
<num
; ++i
) {
3394 LOG
.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3395 startedServer
= true;
3397 return startedServer
;
3402 * This method clones the passed <code>c</code> configuration setting a new
3403 * user into the clone. Use it getting new instances of FileSystem. Only
3404 * works for DistributedFileSystem w/o Kerberos.
3405 * @param c Initial configuration
3406 * @param differentiatingSuffix Suffix to differentiate this user from others.
3407 * @return A new configuration instance with a different user set into it.
3408 * @throws IOException
3410 public static User
getDifferentUser(final Configuration c
,
3411 final String differentiatingSuffix
)
3412 throws IOException
{
3413 FileSystem currentfs
= FileSystem
.get(c
);
3414 if (!(currentfs
instanceof DistributedFileSystem
) || User
.isHBaseSecurityEnabled(c
)) {
3415 return User
.getCurrent();
3417 // Else distributed filesystem. Make a new instance per daemon. Below
3418 // code is taken from the AppendTestUtil over in hdfs.
3419 String username
= User
.getCurrent().getName() +
3420 differentiatingSuffix
;
3421 User user
= User
.createUserForTesting(c
, username
,
3422 new String
[]{"supergroup"});
3426 public static NavigableSet
<String
> getAllOnlineRegions(MiniHBaseCluster cluster
)
3427 throws IOException
{
3428 NavigableSet
<String
> online
= new TreeSet
<>();
3429 for (RegionServerThread rst
: cluster
.getLiveRegionServerThreads()) {
3431 for (RegionInfo region
:
3432 ProtobufUtil
.getOnlineRegions(rst
.getRegionServer().getRSRpcServices())) {
3433 online
.add(region
.getRegionNameAsString());
3435 } catch (RegionServerStoppedException e
) {
3439 for (MasterThread mt
: cluster
.getLiveMasterThreads()) {
3441 for (RegionInfo region
:
3442 ProtobufUtil
.getOnlineRegions(mt
.getMaster().getRSRpcServices())) {
3443 online
.add(region
.getRegionNameAsString());
3445 } catch (RegionServerStoppedException e
) {
3447 } catch (ServerNotRunningYetException e
) {
3455 * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and
3456 * makes tests linger. Here is the exception you'll see:
3458 * 2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3459 * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3460 * blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683
3461 * failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3463 * @param stream A DFSClient.DFSOutputStream.
3465 * @throws NoSuchFieldException
3466 * @throws SecurityException
3467 * @throws IllegalAccessException
3468 * @throws IllegalArgumentException
3470 public static void setMaxRecoveryErrorCount(final OutputStream stream
,
3473 Class
<?
> [] clazzes
= DFSClient
.class.getDeclaredClasses();
3474 for (Class
<?
> clazz
: clazzes
) {
3475 String className
= clazz
.getSimpleName();
3476 if (className
.equals("DFSOutputStream")) {
3477 if (clazz
.isInstance(stream
)) {
3478 Field maxRecoveryErrorCountField
=
3479 stream
.getClass().getDeclaredField("maxRecoveryErrorCount");
3480 maxRecoveryErrorCountField
.setAccessible(true);
3481 maxRecoveryErrorCountField
.setInt(stream
, max
);
3486 } catch (Exception e
) {
3487 LOG
.info("Could not set max recovery field", e
);
3492 * Uses directly the assignment manager to assign the region. and waits until the specified region
3493 * has completed assignment.
3494 * @return true if the region is assigned false otherwise.
3496 public boolean assignRegion(final RegionInfo regionInfo
)
3497 throws IOException
, InterruptedException
{
3498 final AssignmentManager am
= getHBaseCluster().getMaster().getAssignmentManager();
3499 am
.assign(regionInfo
);
3500 return AssignmentTestingUtil
.waitForAssignment(am
, regionInfo
);
3504 * Move region to destination server and wait till region is completely moved and online
3506 * @param destRegion region to move
3507 * @param destServer destination server of the region
3508 * @throws InterruptedException
3509 * @throws IOException
3511 public void moveRegionAndWait(RegionInfo destRegion
, ServerName destServer
)
3512 throws InterruptedException
, IOException
{
3513 HMaster master
= getMiniHBaseCluster().getMaster();
3514 // TODO: Here we start the move. The move can take a while.
3515 getAdmin().move(destRegion
.getEncodedNameAsBytes(), destServer
);
3517 ServerName serverName
= master
.getAssignmentManager().getRegionStates()
3518 .getRegionServerOfRegion(destRegion
);
3519 if (serverName
!= null && serverName
.equals(destServer
)) {
3520 assertRegionOnServer(destRegion
, serverName
, 2000);
3528 * Wait until all regions for a table in hbase:meta have a non-empty
3529 * info:server, up to a configuable timeout value (default is 60 seconds)
3530 * This means all regions have been deployed,
3531 * master has been informed and updated hbase:meta with the regions deployed
3533 * @param tableName the table name
3534 * @throws IOException
3536 public void waitUntilAllRegionsAssigned(final TableName tableName
) throws IOException
{
3537 waitUntilAllRegionsAssigned(tableName
,
3538 this.conf
.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3542 * Waith until all system table's regions get assigned
3543 * @throws IOException
3545 public void waitUntilAllSystemRegionsAssigned() throws IOException
{
3546 waitUntilAllRegionsAssigned(TableName
.META_TABLE_NAME
);
3550 * Wait until all regions for a table in hbase:meta have a non-empty
3551 * info:server, or until timeout. This means all regions have been deployed,
3552 * master has been informed and updated hbase:meta with the regions deployed
3554 * @param tableName the table name
3555 * @param timeout timeout, in milliseconds
3556 * @throws IOException
3558 public void waitUntilAllRegionsAssigned(final TableName tableName
, final long timeout
)
3559 throws IOException
{
3560 if (!TableName
.isMetaTableName(tableName
)) {
3561 try (final Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
)) {
3562 LOG
.debug("Waiting until all regions of table " + tableName
+ " get assigned. Timeout = " +
3564 waitFor(timeout
, 200, true, new ExplainingPredicate
<IOException
>() {
3566 public String
explainFailure() throws IOException
{
3567 return explainTableAvailability(tableName
);
3571 public boolean evaluate() throws IOException
{
3572 Scan scan
= new Scan();
3573 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
3574 boolean tableFound
= false;
3575 try (ResultScanner s
= meta
.getScanner(scan
)) {
3576 for (Result r
; (r
= s
.next()) != null;) {
3577 byte[] b
= r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.REGIONINFO_QUALIFIER
);
3578 RegionInfo info
= RegionInfo
.parseFromOrNull(b
);
3579 if (info
!= null && info
.getTable().equals(tableName
)) {
3580 // Get server hosting this region from catalog family. Return false if no server
3581 // hosting this region, or if the server hosting this region was recently killed
3582 // (for fault tolerance testing).
3585 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.SERVER_QUALIFIER
);
3586 if (server
== null) {
3590 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.STARTCODE_QUALIFIER
);
3591 ServerName serverName
=
3592 ServerName
.valueOf(Bytes
.toString(server
).replaceFirst(":", ",") + "," +
3593 Bytes
.toLong(startCode
));
3594 if (!getHBaseClusterInterface().isDistributedCluster() &&
3595 getHBaseCluster().isKilledRS(serverName
)) {
3599 if (RegionStateStore
.getRegionState(r
, info
) != RegionState
.State
.OPEN
) {
3606 LOG
.warn("Didn't find the entries for table " + tableName
+ " in meta, already deleted?");
3613 LOG
.info("All regions for table " + tableName
+ " assigned to meta. Checking AM states.");
3614 // check from the master state if we are using a mini cluster
3615 if (!getHBaseClusterInterface().isDistributedCluster()) {
3616 // So, all regions are in the meta table but make sure master knows of the assignments before
3617 // returning -- sometimes this can lag.
3618 HMaster master
= getHBaseCluster().getMaster();
3619 final RegionStates states
= master
.getAssignmentManager().getRegionStates();
3620 waitFor(timeout
, 200, new ExplainingPredicate
<IOException
>() {
3622 public String
explainFailure() throws IOException
{
3623 return explainTableAvailability(tableName
);
3627 public boolean evaluate() throws IOException
{
3628 List
<RegionInfo
> hris
= states
.getRegionsOfTable(tableName
);
3629 return hris
!= null && !hris
.isEmpty();
3633 LOG
.info("All regions for table " + tableName
+ " assigned.");
3637 * Do a small get/scan against one store. This is required because store
3638 * has no actual methods of querying itself, and relies on StoreScanner.
3640 public static List
<Cell
> getFromStoreFile(HStore store
,
3641 Get get
) throws IOException
{
3642 Scan scan
= new Scan(get
);
3643 InternalScanner scanner
= (InternalScanner
) store
.getScanner(scan
,
3644 scan
.getFamilyMap().get(store
.getColumnFamilyDescriptor().getName()),
3645 // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3649 List
<Cell
> result
= new ArrayList
<>();
3650 scanner
.next(result
);
3651 if (!result
.isEmpty()) {
3652 // verify that we are on the row we want:
3653 Cell kv
= result
.get(0);
3654 if (!CellUtil
.matchingRows(kv
, get
.getRow())) {
3663 * Create region split keys between startkey and endKey
3667 * @param numRegions the number of regions to be created. it has to be greater than 3.
3668 * @return resulting split keys
3670 public byte[][] getRegionSplitStartKeys(byte[] startKey
, byte[] endKey
, int numRegions
){
3671 assertTrue(numRegions
>3);
3672 byte [][] tmpSplitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
3673 byte [][] result
= new byte[tmpSplitKeys
.length
+1][];
3674 System
.arraycopy(tmpSplitKeys
, 0, result
, 1, tmpSplitKeys
.length
);
3675 result
[0] = HConstants
.EMPTY_BYTE_ARRAY
;
3680 * Do a small get/scan against one store. This is required because store
3681 * has no actual methods of querying itself, and relies on StoreScanner.
3683 public static List
<Cell
> getFromStoreFile(HStore store
,
3685 NavigableSet
<byte[]> columns
3686 ) throws IOException
{
3687 Get get
= new Get(row
);
3688 Map
<byte[], NavigableSet
<byte[]>> s
= get
.getFamilyMap();
3689 s
.put(store
.getColumnFamilyDescriptor().getName(), columns
);
3691 return getFromStoreFile(store
,get
);
3694 public static void assertKVListsEqual(String additionalMsg
,
3695 final List
<?
extends Cell
> expected
,
3696 final List
<?
extends Cell
> actual
) {
3697 final int eLen
= expected
.size();
3698 final int aLen
= actual
.size();
3699 final int minLen
= Math
.min(eLen
, aLen
);
3702 for (i
= 0; i
< minLen
3703 && CellComparator
.getInstance().compare(expected
.get(i
), actual
.get(i
)) == 0;
3706 if (additionalMsg
== null) {
3709 if (!additionalMsg
.isEmpty()) {
3710 additionalMsg
= ". " + additionalMsg
;
3713 if (eLen
!= aLen
|| i
!= minLen
) {
3714 throw new AssertionError(
3715 "Expected and actual KV arrays differ at position " + i
+ ": " +
3716 safeGetAsStr(expected
, i
) + " (length " + eLen
+") vs. " +
3717 safeGetAsStr(actual
, i
) + " (length " + aLen
+ ")" + additionalMsg
);
3721 public static <T
> String
safeGetAsStr(List
<T
> lst
, int i
) {
3722 if (0 <= i
&& i
< lst
.size()) {
3723 return lst
.get(i
).toString();
3725 return "<out_of_range>";
3729 public String
getClusterKey() {
3730 return conf
.get(HConstants
.ZOOKEEPER_QUORUM
) + ":"
3731 + conf
.get(HConstants
.ZOOKEEPER_CLIENT_PORT
) + ":"
3732 + conf
.get(HConstants
.ZOOKEEPER_ZNODE_PARENT
,
3733 HConstants
.DEFAULT_ZOOKEEPER_ZNODE_PARENT
);
3736 /** Creates a random table with the given parameters */
3737 public Table
createRandomTable(TableName tableName
,
3738 final Collection
<String
> families
,
3739 final int maxVersions
,
3740 final int numColsPerRow
,
3741 final int numFlushes
,
3742 final int numRegions
,
3743 final int numRowsPerFlush
)
3744 throws IOException
, InterruptedException
{
3746 LOG
.info("\n\nCreating random table " + tableName
+ " with " + numRegions
+
3747 " regions, " + numFlushes
+ " storefiles per region, " +
3748 numRowsPerFlush
+ " rows per flush, maxVersions=" + maxVersions
+
3751 final Random rand
= new Random(tableName
.hashCode() * 17L + 12938197137L);
3752 final int numCF
= families
.size();
3753 final byte[][] cfBytes
= new byte[numCF
][];
3756 for (String cf
: families
) {
3757 cfBytes
[cfIndex
++] = Bytes
.toBytes(cf
);
3761 final int actualStartKey
= 0;
3762 final int actualEndKey
= Integer
.MAX_VALUE
;
3763 final int keysPerRegion
= (actualEndKey
- actualStartKey
) / numRegions
;
3764 final int splitStartKey
= actualStartKey
+ keysPerRegion
;
3765 final int splitEndKey
= actualEndKey
- keysPerRegion
;
3766 final String keyFormat
= "%08x";
3767 final Table table
= createTable(tableName
, cfBytes
,
3769 Bytes
.toBytes(String
.format(keyFormat
, splitStartKey
)),
3770 Bytes
.toBytes(String
.format(keyFormat
, splitEndKey
)),
3773 if (hbaseCluster
!= null) {
3774 getMiniHBaseCluster().flushcache(TableName
.META_TABLE_NAME
);
3777 BufferedMutator mutator
= getConnection().getBufferedMutator(tableName
);
3779 for (int iFlush
= 0; iFlush
< numFlushes
; ++iFlush
) {
3780 for (int iRow
= 0; iRow
< numRowsPerFlush
; ++iRow
) {
3781 final byte[] row
= Bytes
.toBytes(String
.format(keyFormat
,
3782 actualStartKey
+ rand
.nextInt(actualEndKey
- actualStartKey
)));
3784 Put put
= new Put(row
);
3785 Delete del
= new Delete(row
);
3786 for (int iCol
= 0; iCol
< numColsPerRow
; ++iCol
) {
3787 final byte[] cf
= cfBytes
[rand
.nextInt(numCF
)];
3788 final long ts
= rand
.nextInt();
3789 final byte[] qual
= Bytes
.toBytes("col" + iCol
);
3790 if (rand
.nextBoolean()) {
3791 final byte[] value
= Bytes
.toBytes("value_for_row_" + iRow
+
3792 "_cf_" + Bytes
.toStringBinary(cf
) + "_col_" + iCol
+ "_ts_" +
3793 ts
+ "_random_" + rand
.nextLong());
3794 put
.addColumn(cf
, qual
, ts
, value
);
3795 } else if (rand
.nextDouble() < 0.8) {
3796 del
.addColumn(cf
, qual
, ts
);
3798 del
.addColumns(cf
, qual
, ts
);
3802 if (!put
.isEmpty()) {
3803 mutator
.mutate(put
);
3806 if (!del
.isEmpty()) {
3807 mutator
.mutate(del
);
3810 LOG
.info("Initiating flush #" + iFlush
+ " for table " + tableName
);
3812 if (hbaseCluster
!= null) {
3813 getMiniHBaseCluster().flushcache(table
.getName());
3821 public static int randomFreePort() {
3822 return HBaseCommonTestingUtility
.randomFreePort();
3824 public static String
randomMultiCastAddress() {
3825 return "226.1.1." + random
.nextInt(254);
3828 public static void waitForHostPort(String host
, int port
)
3829 throws IOException
{
3830 final int maxTimeMs
= 10000;
3831 final int maxNumAttempts
= maxTimeMs
/ HConstants
.SOCKET_RETRY_WAIT_MS
;
3832 IOException savedException
= null;
3833 LOG
.info("Waiting for server at " + host
+ ":" + port
);
3834 for (int attempt
= 0; attempt
< maxNumAttempts
; ++attempt
) {
3836 Socket sock
= new Socket(InetAddress
.getByName(host
), port
);
3838 savedException
= null;
3839 LOG
.info("Server at " + host
+ ":" + port
+ " is available");
3841 } catch (UnknownHostException e
) {
3842 throw new IOException("Failed to look up " + host
, e
);
3843 } catch (IOException e
) {
3846 Threads
.sleepWithoutInterrupt(HConstants
.SOCKET_RETRY_WAIT_MS
);
3849 if (savedException
!= null) {
3850 throw savedException
;
3855 * Creates a pre-split table for load testing. If the table already exists,
3856 * logs a warning and continues.
3857 * @return the number of regions the table was split into
3859 public static int createPreSplitLoadTestTable(Configuration conf
,
3860 TableName tableName
, byte[] columnFamily
, Algorithm compression
,
3861 DataBlockEncoding dataBlockEncoding
) throws IOException
{
3862 return createPreSplitLoadTestTable(conf
, tableName
,
3863 columnFamily
, compression
, dataBlockEncoding
, DEFAULT_REGIONS_PER_SERVER
, 1,
3864 Durability
.USE_DEFAULT
);
3867 * Creates a pre-split table for load testing. If the table already exists,
3868 * logs a warning and continues.
3869 * @return the number of regions the table was split into
3871 public static int createPreSplitLoadTestTable(Configuration conf
,
3872 TableName tableName
, byte[] columnFamily
, Algorithm compression
,
3873 DataBlockEncoding dataBlockEncoding
, int numRegionsPerServer
, int regionReplication
,
3874 Durability durability
)
3875 throws IOException
{
3876 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
3877 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
3878 tableDescriptor
.setDurability(durability
);
3879 tableDescriptor
.setRegionReplication(regionReplication
);
3880 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
3881 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(columnFamily
);
3882 familyDescriptor
.setDataBlockEncoding(dataBlockEncoding
);
3883 familyDescriptor
.setCompressionType(compression
);
3884 return createPreSplitLoadTestTable(conf
, tableDescriptor
, familyDescriptor
,
3885 numRegionsPerServer
);
3889 * Creates a pre-split table for load testing. If the table already exists,
3890 * logs a warning and continues.
3891 * @return the number of regions the table was split into
3893 public static int createPreSplitLoadTestTable(Configuration conf
,
3894 TableName tableName
, byte[][] columnFamilies
, Algorithm compression
,
3895 DataBlockEncoding dataBlockEncoding
, int numRegionsPerServer
, int regionReplication
,
3896 Durability durability
)
3897 throws IOException
{
3898 TableDescriptorBuilder
.ModifyableTableDescriptor tableDescriptor
=
3899 new TableDescriptorBuilder
.ModifyableTableDescriptor(tableName
);
3900 tableDescriptor
.setDurability(durability
);
3901 tableDescriptor
.setRegionReplication(regionReplication
);
3902 ColumnFamilyDescriptor
[] hcds
= new ColumnFamilyDescriptor
[columnFamilies
.length
];
3903 for (int i
= 0; i
< columnFamilies
.length
; i
++) {
3904 ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor familyDescriptor
=
3905 new ColumnFamilyDescriptorBuilder
.ModifyableColumnFamilyDescriptor(columnFamilies
[i
]);
3906 familyDescriptor
.setDataBlockEncoding(dataBlockEncoding
);
3907 familyDescriptor
.setCompressionType(compression
);
3908 hcds
[i
] = familyDescriptor
;
3910 return createPreSplitLoadTestTable(conf
, tableDescriptor
, hcds
, numRegionsPerServer
);
3914 * Creates a pre-split table for load testing. If the table already exists,
3915 * logs a warning and continues.
3916 * @return the number of regions the table was split into
3918 public static int createPreSplitLoadTestTable(Configuration conf
,
3919 TableDescriptor desc
, ColumnFamilyDescriptor hcd
) throws IOException
{
3920 return createPreSplitLoadTestTable(conf
, desc
, hcd
, DEFAULT_REGIONS_PER_SERVER
);
3924 * Creates a pre-split table for load testing. If the table already exists,
3925 * logs a warning and continues.
3926 * @return the number of regions the table was split into
3928 public static int createPreSplitLoadTestTable(Configuration conf
,
3929 TableDescriptor desc
, ColumnFamilyDescriptor hcd
, int numRegionsPerServer
) throws IOException
{
3930 return createPreSplitLoadTestTable(conf
, desc
, new ColumnFamilyDescriptor
[] {hcd
},
3931 numRegionsPerServer
);
3935 * Creates a pre-split table for load testing. If the table already exists,
3936 * logs a warning and continues.
3937 * @return the number of regions the table was split into
3939 public static int createPreSplitLoadTestTable(Configuration conf
,
3940 TableDescriptor desc
, ColumnFamilyDescriptor
[] hcds
,
3941 int numRegionsPerServer
) throws IOException
{
3942 return createPreSplitLoadTestTable(conf
, desc
, hcds
,
3943 new RegionSplitter
.HexStringSplit(), numRegionsPerServer
);
3947 * Creates a pre-split table for load testing. If the table already exists,
3948 * logs a warning and continues.
3949 * @return the number of regions the table was split into
3951 public static int createPreSplitLoadTestTable(Configuration conf
,
3952 TableDescriptor td
, ColumnFamilyDescriptor
[] cds
,
3953 SplitAlgorithm splitter
, int numRegionsPerServer
) throws IOException
{
3954 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(td
);
3955 for (ColumnFamilyDescriptor cd
: cds
) {
3956 if (!td
.hasColumnFamily(cd
.getName())) {
3957 builder
.setColumnFamily(cd
);
3960 td
= builder
.build();
3961 int totalNumberOfRegions
= 0;
3962 Connection unmanagedConnection
= ConnectionFactory
.createConnection(conf
);
3963 Admin admin
= unmanagedConnection
.getAdmin();
3966 // create a table a pre-splits regions.
3967 // The number of splits is set as:
3968 // region servers * regions per region server).
3969 int numberOfServers
= admin
.getRegionServers().size();
3970 if (numberOfServers
== 0) {
3971 throw new IllegalStateException("No live regionservers");
3974 totalNumberOfRegions
= numberOfServers
* numRegionsPerServer
;
3975 LOG
.info("Number of live regionservers: " + numberOfServers
+ ", " +
3976 "pre-splitting table into " + totalNumberOfRegions
+ " regions " +
3977 "(regions per server: " + numRegionsPerServer
+ ")");
3979 byte[][] splits
= splitter
.split(
3980 totalNumberOfRegions
);
3982 admin
.createTable(td
, splits
);
3983 } catch (MasterNotRunningException e
) {
3984 LOG
.error("Master not running", e
);
3985 throw new IOException(e
);
3986 } catch (TableExistsException e
) {
3987 LOG
.warn("Table " + td
.getTableName() +
3988 " already exists, continuing");
3991 unmanagedConnection
.close();
3993 return totalNumberOfRegions
;
3996 public static int getMetaRSPort(Connection connection
) throws IOException
{
3997 try (RegionLocator locator
= connection
.getRegionLocator(TableName
.META_TABLE_NAME
)) {
3998 return locator
.getRegionLocation(Bytes
.toBytes("")).getPort();
4003 * Due to async racing issue, a region may not be in
4004 * the online region list of a region server yet, after
4005 * the assignment znode is deleted and the new assignment
4006 * is recorded in master.
4008 public void assertRegionOnServer(
4009 final RegionInfo hri
, final ServerName server
,
4010 final long timeout
) throws IOException
, InterruptedException
{
4011 long timeoutTime
= System
.currentTimeMillis() + timeout
;
4013 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
4014 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) return;
4015 long now
= System
.currentTimeMillis();
4016 if (now
> timeoutTime
) break;
4019 fail("Could not find region " + hri
.getRegionNameAsString()
4020 + " on server " + server
);
4024 * Check to make sure the region is open on the specified
4025 * region server, but not on any other one.
4027 public void assertRegionOnlyOnServer(
4028 final RegionInfo hri
, final ServerName server
,
4029 final long timeout
) throws IOException
, InterruptedException
{
4030 long timeoutTime
= System
.currentTimeMillis() + timeout
;
4032 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
4033 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) {
4034 List
<JVMClusterUtil
.RegionServerThread
> rsThreads
=
4035 getHBaseCluster().getLiveRegionServerThreads();
4036 for (JVMClusterUtil
.RegionServerThread rsThread
: rsThreads
) {
4037 HRegionServer rs
= rsThread
.getRegionServer();
4038 if (server
.equals(rs
.getServerName())) {
4041 Collection
<HRegion
> hrs
= rs
.getOnlineRegionsLocalContext();
4042 for (HRegion r
: hrs
) {
4043 assertTrue("Region should not be double assigned",
4044 r
.getRegionInfo().getRegionId() != hri
.getRegionId());
4047 return; // good, we are happy
4049 long now
= System
.currentTimeMillis();
4050 if (now
> timeoutTime
) break;
4053 fail("Could not find region " + hri
.getRegionNameAsString()
4054 + " on server " + server
);
4057 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
) throws IOException
{
4058 TableDescriptor td
=
4059 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
4060 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
4061 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
);
4064 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
,
4065 BlockCache blockCache
) throws IOException
{
4066 TableDescriptor td
=
4067 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
4068 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
4069 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
, blockCache
);
4072 public void setFileSystemURI(String fsURI
) {
4077 * Returns a {@link Predicate} for checking that there are no regions in transition in master
4079 public ExplainingPredicate
<IOException
> predicateNoRegionsInTransition() {
4080 return new ExplainingPredicate
<IOException
>() {
4082 public String
explainFailure() throws IOException
{
4083 final RegionStates regionStates
= getMiniHBaseCluster().getMaster()
4084 .getAssignmentManager().getRegionStates();
4085 return "found in transition: " + regionStates
.getRegionsInTransition().toString();
4089 public boolean evaluate() throws IOException
{
4090 HMaster master
= getMiniHBaseCluster().getMaster();
4091 if (master
== null) return false;
4092 AssignmentManager am
= master
.getAssignmentManager();
4093 if (am
== null) return false;
4094 return !am
.hasRegionsInTransition();
4100 * Returns a {@link Predicate} for checking that table is enabled
4102 public Waiter
.Predicate
<IOException
> predicateTableEnabled(final TableName tableName
) {
4103 return new ExplainingPredicate
<IOException
>() {
4105 public String
explainFailure() throws IOException
{
4106 return explainTableState(tableName
, TableState
.State
.ENABLED
);
4110 public boolean evaluate() throws IOException
{
4111 return getAdmin().tableExists(tableName
) && getAdmin().isTableEnabled(tableName
);
4117 * Returns a {@link Predicate} for checking that table is enabled
4119 public Waiter
.Predicate
<IOException
> predicateTableDisabled(final TableName tableName
) {
4120 return new ExplainingPredicate
<IOException
>() {
4122 public String
explainFailure() throws IOException
{
4123 return explainTableState(tableName
, TableState
.State
.DISABLED
);
4127 public boolean evaluate() throws IOException
{
4128 return getAdmin().isTableDisabled(tableName
);
4134 * Returns a {@link Predicate} for checking that table is enabled
4136 public Waiter
.Predicate
<IOException
> predicateTableAvailable(final TableName tableName
) {
4137 return new ExplainingPredicate
<IOException
>() {
4139 public String
explainFailure() throws IOException
{
4140 return explainTableAvailability(tableName
);
4144 public boolean evaluate() throws IOException
{
4145 boolean tableAvailable
= getAdmin().isTableAvailable(tableName
);
4146 if (tableAvailable
) {
4147 try (Table table
= getConnection().getTable(tableName
)) {
4148 TableDescriptor htd
= table
.getDescriptor();
4149 for (HRegionLocation loc
: getConnection().getRegionLocator(tableName
)
4150 .getAllRegionLocations()) {
4151 Scan scan
= new Scan().withStartRow(loc
.getRegion().getStartKey())
4152 .withStopRow(loc
.getRegion().getEndKey()).setOneRowLimit()
4153 .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
4154 for (byte[] family
: htd
.getColumnFamilyNames()) {
4155 scan
.addFamily(family
);
4157 try (ResultScanner scanner
= table
.getScanner(scan
)) {
4163 return tableAvailable
;
4169 * Wait until no regions in transition.
4170 * @param timeout How long to wait.
4171 * @throws IOException
4173 public void waitUntilNoRegionsInTransition(final long timeout
) throws IOException
{
4174 waitFor(timeout
, predicateNoRegionsInTransition());
4178 * Wait until no regions in transition. (time limit 15min)
4179 * @throws IOException
4181 public void waitUntilNoRegionsInTransition() throws IOException
{
4182 waitUntilNoRegionsInTransition(15 * 60000);
4186 * Wait until labels is ready in VisibilityLabelsCache.
4187 * @param timeoutMillis
4190 public void waitLabelAvailable(long timeoutMillis
, final String
... labels
) {
4191 final VisibilityLabelsCache labelsCache
= VisibilityLabelsCache
.get();
4192 waitFor(timeoutMillis
, new Waiter
.ExplainingPredicate
<RuntimeException
>() {
4195 public boolean evaluate() {
4196 for (String label
: labels
) {
4197 if (labelsCache
.getLabelOrdinal(label
) == 0) {
4205 public String
explainFailure() {
4206 for (String label
: labels
) {
4207 if (labelsCache
.getLabelOrdinal(label
) == 0) {
4208 return label
+ " is not available yet";
4217 * Create a set of column descriptors with the combination of compression,
4218 * encoding, bloom codecs available.
4219 * @return the list of column descriptors
4221 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors() {
4222 return generateColumnDescriptors("");
4226 * Create a set of column descriptors with the combination of compression,
4227 * encoding, bloom codecs available.
4228 * @param prefix family names prefix
4229 * @return the list of column descriptors
4231 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors(final String prefix
) {
4232 List
<ColumnFamilyDescriptor
> columnFamilyDescriptors
= new ArrayList
<>();
4234 for (Compression
.Algorithm compressionType
: getSupportedCompressionAlgorithms()) {
4235 for (DataBlockEncoding encodingType
: DataBlockEncoding
.values()) {
4236 for (BloomType bloomType
: BloomType
.values()) {
4237 String name
= String
.format("%s-cf-!@#&-%d!@#", prefix
, familyId
);
4238 ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder
=
4239 ColumnFamilyDescriptorBuilder
.newBuilder(Bytes
.toBytes(name
));
4240 columnFamilyDescriptorBuilder
.setCompressionType(compressionType
);
4241 columnFamilyDescriptorBuilder
.setDataBlockEncoding(encodingType
);
4242 columnFamilyDescriptorBuilder
.setBloomFilterType(bloomType
);
4243 columnFamilyDescriptors
.add(columnFamilyDescriptorBuilder
.build());
4248 return columnFamilyDescriptors
;
4252 * Get supported compression algorithms.
4253 * @return supported compression algorithms.
4255 public static Compression
.Algorithm
[] getSupportedCompressionAlgorithms() {
4256 String
[] allAlgos
= HFile
.getSupportedCompressionAlgorithms();
4257 List
<Compression
.Algorithm
> supportedAlgos
= new ArrayList
<>();
4258 for (String algoName
: allAlgos
) {
4260 Compression
.Algorithm algo
= Compression
.getCompressionAlgorithmByName(algoName
);
4261 algo
.getCompressor();
4262 supportedAlgos
.add(algo
);
4263 } catch (Throwable t
) {
4264 // this algo is not available
4267 return supportedAlgos
.toArray(new Algorithm
[supportedAlgos
.size()]);
4270 public Result
getClosestRowBefore(Region r
, byte[] row
, byte[] family
) throws IOException
{
4271 Scan scan
= new Scan().withStartRow(row
);
4272 scan
.setSmall(true);
4274 scan
.setReversed(true);
4275 scan
.addFamily(family
);
4276 try (RegionScanner scanner
= r
.getScanner(scan
)) {
4277 List
<Cell
> cells
= new ArrayList
<>(1);
4278 scanner
.next(cells
);
4279 if (r
.getRegionInfo().isMetaRegion() && !isTargetTable(row
, cells
.get(0))) {
4282 return Result
.create(cells
);
4286 private boolean isTargetTable(final byte[] inRow
, Cell c
) {
4287 String inputRowString
= Bytes
.toString(inRow
);
4288 int i
= inputRowString
.indexOf(HConstants
.DELIMITER
);
4289 String outputRowString
= Bytes
.toString(c
.getRowArray(), c
.getRowOffset(), c
.getRowLength());
4290 int o
= outputRowString
.indexOf(HConstants
.DELIMITER
);
4291 return inputRowString
.substring(0, i
).equals(outputRowString
.substring(0, o
));
4295 * Sets up {@link MiniKdc} for testing security.
4296 * Uses {@link HBaseKerberosUtils} to set the given keytab file as
4297 * {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}.
4298 * FYI, there is also the easier-to-use kerby KDC server and utility for using it,
4299 * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
4300 * less baggage. It came in in HBASE-5291.
4302 public MiniKdc
setupMiniKdc(File keytabFile
) throws Exception
{
4303 Properties conf
= MiniKdc
.createConf();
4304 conf
.put(MiniKdc
.DEBUG
, true);
4307 // There is time lag between selecting a port and trying to bind with it. It's possible that
4308 // another service captures the port in between which'll result in BindException.
4309 boolean bindException
;
4313 bindException
= false;
4314 dir
= new File(getDataTestDir("kdc").toUri().getPath());
4315 kdc
= new MiniKdc(conf
, dir
);
4317 } catch (BindException e
) {
4318 FileUtils
.deleteDirectory(dir
); // clean directory
4320 if (numTries
== 3) {
4321 LOG
.error("Failed setting up MiniKDC. Tried " + numTries
+ " times.");
4324 LOG
.error("BindException encountered when setting up MiniKdc. Trying again.");
4325 bindException
= true;
4327 } while (bindException
);
4328 HBaseKerberosUtils
.setKeytabFileForTesting(keytabFile
.getAbsolutePath());
4332 public int getNumHFiles(final TableName tableName
, final byte[] family
) {
4334 for (RegionServerThread regionServerThread
: getMiniHBaseCluster().getRegionServerThreads()) {
4335 numHFiles
+= getNumHFilesForRS(regionServerThread
.getRegionServer(), tableName
,
4341 public int getNumHFilesForRS(final HRegionServer rs
, final TableName tableName
,
4342 final byte[] family
) {
4344 for (Region region
: rs
.getRegions(tableName
)) {
4345 numHFiles
+= region
.getStore(family
).getStorefilesCount();
4350 public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd
, TableDescriptor rtd
) {
4351 assertEquals(ltd
.getValues().hashCode(), rtd
.getValues().hashCode());
4352 Collection
<ColumnFamilyDescriptor
> ltdFamilies
= Arrays
.asList(ltd
.getColumnFamilies());
4353 Collection
<ColumnFamilyDescriptor
> rtdFamilies
= Arrays
.asList(rtd
.getColumnFamilies());
4354 assertEquals(ltdFamilies
.size(), rtdFamilies
.size());
4355 for (Iterator
<ColumnFamilyDescriptor
> it
= ltdFamilies
.iterator(), it2
=
4356 rtdFamilies
.iterator(); it
.hasNext();) {
4358 ColumnFamilyDescriptor
.COMPARATOR
.compare(it
.next(), it2
.next()));