2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertTrue
;
22 import static org
.junit
.Assert
.fail
;
24 import edu
.umd
.cs
.findbugs
.annotations
.Nullable
;
26 import java
.io
.IOException
;
27 import java
.io
.OutputStream
;
28 import java
.io
.UncheckedIOException
;
29 import java
.lang
.reflect
.Field
;
30 import java
.lang
.reflect
.Modifier
;
31 import java
.net
.BindException
;
32 import java
.net
.DatagramSocket
;
33 import java
.net
.InetAddress
;
34 import java
.net
.ServerSocket
;
35 import java
.net
.Socket
;
36 import java
.net
.UnknownHostException
;
37 import java
.nio
.charset
.StandardCharsets
;
38 import java
.security
.MessageDigest
;
39 import java
.util
.ArrayList
;
40 import java
.util
.Arrays
;
41 import java
.util
.Collection
;
42 import java
.util
.Collections
;
43 import java
.util
.HashSet
;
44 import java
.util
.Iterator
;
45 import java
.util
.List
;
47 import java
.util
.NavigableSet
;
48 import java
.util
.Properties
;
49 import java
.util
.Random
;
51 import java
.util
.TreeSet
;
52 import java
.util
.concurrent
.TimeUnit
;
53 import java
.util
.concurrent
.atomic
.AtomicReference
;
54 import java
.util
.function
.BooleanSupplier
;
55 import org
.apache
.commons
.io
.FileUtils
;
56 import org
.apache
.commons
.lang3
.RandomStringUtils
;
57 import org
.apache
.hadoop
.conf
.Configuration
;
58 import org
.apache
.hadoop
.fs
.FileSystem
;
59 import org
.apache
.hadoop
.fs
.Path
;
60 import org
.apache
.hadoop
.hbase
.Waiter
.ExplainingPredicate
;
61 import org
.apache
.hadoop
.hbase
.Waiter
.Predicate
;
62 import org
.apache
.hadoop
.hbase
.client
.Admin
;
63 import org
.apache
.hadoop
.hbase
.client
.AsyncClusterConnection
;
64 import org
.apache
.hadoop
.hbase
.client
.BufferedMutator
;
65 import org
.apache
.hadoop
.hbase
.client
.ClusterConnectionFactory
;
66 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
67 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
68 import org
.apache
.hadoop
.hbase
.client
.Connection
;
69 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
70 import org
.apache
.hadoop
.hbase
.client
.Consistency
;
71 import org
.apache
.hadoop
.hbase
.client
.Delete
;
72 import org
.apache
.hadoop
.hbase
.client
.Durability
;
73 import org
.apache
.hadoop
.hbase
.client
.Get
;
74 import org
.apache
.hadoop
.hbase
.client
.Hbck
;
75 import org
.apache
.hadoop
.hbase
.client
.MasterRegistry
;
76 import org
.apache
.hadoop
.hbase
.client
.Put
;
77 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
78 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
79 import org
.apache
.hadoop
.hbase
.client
.RegionLocator
;
80 import org
.apache
.hadoop
.hbase
.client
.Result
;
81 import org
.apache
.hadoop
.hbase
.client
.ResultScanner
;
82 import org
.apache
.hadoop
.hbase
.client
.Scan
;
83 import org
.apache
.hadoop
.hbase
.client
.Scan
.ReadType
;
84 import org
.apache
.hadoop
.hbase
.client
.Table
;
85 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
86 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
87 import org
.apache
.hadoop
.hbase
.client
.TableState
;
88 import org
.apache
.hadoop
.hbase
.fs
.HFileSystem
;
89 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
;
90 import org
.apache
.hadoop
.hbase
.io
.compress
.Compression
.Algorithm
;
91 import org
.apache
.hadoop
.hbase
.io
.encoding
.DataBlockEncoding
;
92 import org
.apache
.hadoop
.hbase
.io
.hfile
.BlockCache
;
93 import org
.apache
.hadoop
.hbase
.io
.hfile
.ChecksumUtil
;
94 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFile
;
95 import org
.apache
.hadoop
.hbase
.ipc
.RpcServerInterface
;
96 import org
.apache
.hadoop
.hbase
.logging
.Log4jUtils
;
97 import org
.apache
.hadoop
.hbase
.mapreduce
.MapreduceTestingShim
;
98 import org
.apache
.hadoop
.hbase
.master
.HMaster
;
99 import org
.apache
.hadoop
.hbase
.master
.RegionState
;
100 import org
.apache
.hadoop
.hbase
.master
.ServerManager
;
101 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentManager
;
102 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentTestingUtil
;
103 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStateStore
;
104 import org
.apache
.hadoop
.hbase
.master
.assignment
.RegionStates
;
105 import org
.apache
.hadoop
.hbase
.mob
.MobFileCache
;
106 import org
.apache
.hadoop
.hbase
.regionserver
.BloomType
;
107 import org
.apache
.hadoop
.hbase
.regionserver
.ChunkCreator
;
108 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
;
109 import org
.apache
.hadoop
.hbase
.regionserver
.HRegionServer
;
110 import org
.apache
.hadoop
.hbase
.regionserver
.HStore
;
111 import org
.apache
.hadoop
.hbase
.regionserver
.InternalScanner
;
112 import org
.apache
.hadoop
.hbase
.regionserver
.MemStoreLAB
;
113 import org
.apache
.hadoop
.hbase
.regionserver
.Region
;
114 import org
.apache
.hadoop
.hbase
.regionserver
.RegionScanner
;
115 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerServices
;
116 import org
.apache
.hadoop
.hbase
.regionserver
.RegionServerStoppedException
;
117 import org
.apache
.hadoop
.hbase
.security
.HBaseKerberosUtils
;
118 import org
.apache
.hadoop
.hbase
.security
.User
;
119 import org
.apache
.hadoop
.hbase
.security
.UserProvider
;
120 import org
.apache
.hadoop
.hbase
.security
.visibility
.VisibilityLabelsCache
;
121 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
122 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
123 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
124 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
125 import org
.apache
.hadoop
.hbase
.util
.JVM
;
126 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
;
127 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.MasterThread
;
128 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.RegionServerThread
;
129 import org
.apache
.hadoop
.hbase
.util
.Pair
;
130 import org
.apache
.hadoop
.hbase
.util
.ReflectionUtils
;
131 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
;
132 import org
.apache
.hadoop
.hbase
.util
.RegionSplitter
.SplitAlgorithm
;
133 import org
.apache
.hadoop
.hbase
.util
.RetryCounter
;
134 import org
.apache
.hadoop
.hbase
.util
.Threads
;
135 import org
.apache
.hadoop
.hbase
.wal
.WAL
;
136 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
137 import org
.apache
.hadoop
.hbase
.zookeeper
.EmptyWatcher
;
138 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKConfig
;
139 import org
.apache
.hadoop
.hbase
.zookeeper
.ZKWatcher
;
140 import org
.apache
.hadoop
.hdfs
.DFSClient
;
141 import org
.apache
.hadoop
.hdfs
.DistributedFileSystem
;
142 import org
.apache
.hadoop
.hdfs
.MiniDFSCluster
;
143 import org
.apache
.hadoop
.hdfs
.server
.namenode
.EditLogFileOutputStream
;
144 import org
.apache
.hadoop
.mapred
.JobConf
;
145 import org
.apache
.hadoop
.mapred
.MiniMRCluster
;
146 import org
.apache
.hadoop
.mapred
.TaskLog
;
147 import org
.apache
.hadoop
.minikdc
.MiniKdc
;
148 import org
.apache
.yetus
.audience
.InterfaceAudience
;
149 import org
.apache
.yetus
.audience
.InterfaceStability
;
150 import org
.apache
.zookeeper
.WatchedEvent
;
151 import org
.apache
.zookeeper
.ZooKeeper
;
152 import org
.apache
.zookeeper
.ZooKeeper
.States
;
154 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.io
.Closeables
;
156 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
159 * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase
160 * functionality. Create an instance and keep it around testing HBase.
162 * This class is meant to be your one-stop shop for anything you might need testing. Manages one
163 * cluster at a time only. Managed cluster can be an in-process {@link SingleProcessHBaseCluster},
164 * or a deployed cluster of type {@code DistributedHBaseCluster}. Not all methods work with the real
167 * Depends on log4j being on classpath and hbase-site.xml for logging and test-run configuration.
169 * It does not set logging levels.
171 * In the configuration properties, default values for master-info-port and region-server-port are
172 * overridden such that a random port will be assigned (thus avoiding port contention if another
173 * local HBase instance is already running).
175 * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
176 * setting it to true.
178 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience
.PHOENIX
)
179 @InterfaceStability.Evolving
180 public class HBaseTestingUtil
extends HBaseZKTestingUtil
{
183 * System property key to get test directory value. Name is as it is because mini dfs has
184 * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
186 * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
187 * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
190 private static final String TEST_DIRECTORY_KEY
= "test.build.data";
192 public static final String REGIONS_PER_SERVER_KEY
= "hbase.test.regions-per-server";
194 * The default number of regions per regionserver when creating a pre-split table.
196 public static final int DEFAULT_REGIONS_PER_SERVER
= 3;
198 public static final String PRESPLIT_TEST_TABLE_KEY
= "hbase.test.pre-split-table";
199 public static final boolean PRESPLIT_TEST_TABLE
= true;
201 private MiniDFSCluster dfsCluster
= null;
203 private volatile HBaseClusterInterface hbaseCluster
= null;
204 private MiniMRCluster mrCluster
= null;
206 /** If there is a mini cluster running for this testing utility instance. */
207 private volatile boolean miniClusterRunning
;
209 private String hadoopLogDir
;
212 * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility
214 private Path dataTestDirOnTestFS
= null;
216 private final AtomicReference
<AsyncClusterConnection
> asyncConnection
= new AtomicReference
<>();
218 /** Filesystem URI used for map-reduce mini-cluster setup */
219 private static String FS_URI
;
221 /** This is for unit tests parameterized with a single boolean. */
222 public static final List
<Object
[]> MEMSTORETS_TAGS_PARAMETRIZED
= memStoreTSAndTagsCombination();
225 * Checks to see if a specific port is available.
226 * @param port the port number to check for availability
227 * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
229 public static boolean available(int port
) {
230 ServerSocket ss
= null;
231 DatagramSocket ds
= null;
233 ss
= new ServerSocket(port
);
234 ss
.setReuseAddress(true);
235 ds
= new DatagramSocket(port
);
236 ds
.setReuseAddress(true);
238 } catch (IOException e
) {
248 } catch (IOException e
) {
249 /* should not be thrown */
258 * Create all combinations of Bloom filters and compression algorithms for testing.
260 private static List
<Object
[]> bloomAndCompressionCombinations() {
261 List
<Object
[]> configurations
= new ArrayList
<>();
262 for (Compression
.Algorithm comprAlgo
: HBaseCommonTestingUtil
.COMPRESSION_ALGORITHMS
) {
263 for (BloomType bloomType
: BloomType
.values()) {
264 configurations
.add(new Object
[] { comprAlgo
, bloomType
});
267 return Collections
.unmodifiableList(configurations
);
271 * Create combination of memstoreTS and tags
273 private static List
<Object
[]> memStoreTSAndTagsCombination() {
274 List
<Object
[]> configurations
= new ArrayList
<>();
275 configurations
.add(new Object
[] { false, false });
276 configurations
.add(new Object
[] { false, true });
277 configurations
.add(new Object
[] { true, false });
278 configurations
.add(new Object
[] { true, true });
279 return Collections
.unmodifiableList(configurations
);
282 public static List
<Object
[]> memStoreTSTagsAndOffheapCombination() {
283 List
<Object
[]> configurations
= new ArrayList
<>();
284 configurations
.add(new Object
[] { false, false, true });
285 configurations
.add(new Object
[] { false, false, false });
286 configurations
.add(new Object
[] { false, true, true });
287 configurations
.add(new Object
[] { false, true, false });
288 configurations
.add(new Object
[] { true, false, true });
289 configurations
.add(new Object
[] { true, false, false });
290 configurations
.add(new Object
[] { true, true, true });
291 configurations
.add(new Object
[] { true, true, false });
292 return Collections
.unmodifiableList(configurations
);
295 public static final Collection
<Object
[]> BLOOM_AND_COMPRESSION_COMBINATIONS
=
296 bloomAndCompressionCombinations();
300 * Create an HBaseTestingUtility using a default configuration.
302 * Initially, all tmp files are written to a local test data directory. Once
303 * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
304 * data will be written to the DFS directory instead.
306 public HBaseTestingUtil() {
307 this(HBaseConfiguration
.create());
312 * Create an HBaseTestingUtility using a given configuration.
314 * Initially, all tmp files are written to a local test data directory. Once
315 * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
316 * data will be written to the DFS directory instead.
317 * @param conf The configuration to use for further operations
319 public HBaseTestingUtil(@Nullable Configuration conf
) {
322 // a hbase checksum verification failure will cause unit tests to fail
323 ChecksumUtil
.generateExceptionForChecksumFailureForTest(true);
325 // Save this for when setting default file:// breaks things
326 if (this.conf
.get("fs.defaultFS") != null) {
327 this.conf
.set("original.defaultFS", this.conf
.get("fs.defaultFS"));
329 if (this.conf
.get(HConstants
.HBASE_DIR
) != null) {
330 this.conf
.set("original.hbase.dir", this.conf
.get(HConstants
.HBASE_DIR
));
332 // Every cluster is a local cluster until we start DFS
333 // Note that conf could be null, but this.conf will not be
334 String dataTestDir
= getDataTestDir().toString();
335 this.conf
.set("fs.defaultFS", "file:///");
336 this.conf
.set(HConstants
.HBASE_DIR
, "file://" + dataTestDir
);
337 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
338 this.conf
.setBoolean(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
, false);
339 // If the value for random ports isn't set set it to true, thus making
340 // tests opt-out for random port assignment
341 this.conf
.setBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
,
342 this.conf
.getBoolean(LocalHBaseCluster
.ASSIGN_RANDOM_PORTS
, true));
346 * Close both the region {@code r} and it's underlying WAL. For use in tests.
348 public static void closeRegionAndWAL(final Region r
) throws IOException
{
349 closeRegionAndWAL((HRegion
) r
);
353 * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
355 public static void closeRegionAndWAL(final HRegion r
) throws IOException
{
356 if (r
== null) return;
358 if (r
.getWAL() == null) return;
363 * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned
364 * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed
365 * by the Configuration. If say, a Connection was being used against a cluster that had been
366 * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome.
367 * Rather than use the return direct, its usually best to make a copy and use that. Do
368 * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
369 * @return Instance of Configuration.
372 public Configuration
getConfiguration() {
373 return super.getConfiguration();
376 public void setHBaseCluster(HBaseClusterInterface hbaseCluster
) {
377 this.hbaseCluster
= hbaseCluster
;
381 * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can
382 * have many concurrent tests running if we need to. It needs to amend the
383 * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on.
384 * Moding a System property is not the way to do concurrent instances -- another instance could
385 * grab the temporary value unintentionally -- but not anything can do about it at moment; single
386 * instance only is how the minidfscluster works. We also create the underlying directory names
387 * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the
388 * conf, and as a system property for hadoop.tmp.dir (We do not create them!).
389 * @return The calculated data test build directory, if newly-created.
392 protected Path
setupDataTestDir() {
393 Path testPath
= super.setupDataTestDir();
394 if (null == testPath
) {
398 createSubDirAndSystemProperty("hadoop.log.dir", testPath
, "hadoop-log-dir");
400 // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
401 // we want our own value to ensure uniqueness on the same machine
402 createSubDirAndSystemProperty("hadoop.tmp.dir", testPath
, "hadoop-tmp-dir");
404 // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
405 createSubDir("mapreduce.cluster.local.dir", testPath
, "mapred-local-dir");
409 private void createSubDirAndSystemProperty(String propertyName
, Path parent
, String subDirName
) {
411 String sysValue
= System
.getProperty(propertyName
);
413 if (sysValue
!= null) {
414 // There is already a value set. So we do nothing but hope
415 // that there will be no conflicts
416 LOG
.info("System.getProperty(\"" + propertyName
+ "\") already set to: " + sysValue
+
417 " so I do NOT create it in " + parent
);
418 String confValue
= conf
.get(propertyName
);
419 if (confValue
!= null && !confValue
.endsWith(sysValue
)) {
420 LOG
.warn(propertyName
+ " property value differs in configuration and system: " +
421 "Configuration=" + confValue
+ " while System=" + sysValue
+
422 " Erasing configuration value by system value.");
424 conf
.set(propertyName
, sysValue
);
426 // Ok, it's not set, so we create it as a subdirectory
427 createSubDir(propertyName
, parent
, subDirName
);
428 System
.setProperty(propertyName
, conf
.get(propertyName
));
433 * @return Where to write test data on the test filesystem; Returns working directory for the test
434 * filesystem by default
435 * @see #setupDataTestDirOnTestFS()
436 * @see #getTestFileSystem()
438 private Path
getBaseTestDirOnTestFS() throws IOException
{
439 FileSystem fs
= getTestFileSystem();
440 return new Path(fs
.getWorkingDirectory(), "test-data");
444 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
445 * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
447 * @return a unique path in the test filesystem
449 public Path
getDataTestDirOnTestFS() throws IOException
{
450 if (dataTestDirOnTestFS
== null) {
451 setupDataTestDirOnTestFS();
454 return dataTestDirOnTestFS
;
458 * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
459 * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
461 * @return a unique path in the test filesystem
462 * @param subdirName name of the subdir to create under the base test dir
464 public Path
getDataTestDirOnTestFS(final String subdirName
) throws IOException
{
465 return new Path(getDataTestDirOnTestFS(), subdirName
);
469 * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already
472 private void setupDataTestDirOnTestFS() throws IOException
{
473 if (dataTestDirOnTestFS
!= null) {
474 LOG
.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS
.toString());
477 dataTestDirOnTestFS
= getNewDataTestDirOnTestFS();
481 * Sets up a new path in test filesystem to be used by tests.
483 private Path
getNewDataTestDirOnTestFS() throws IOException
{
484 // The file system can be either local, mini dfs, or if the configuration
485 // is supplied externally, it can be an external cluster FS. If it is a local
486 // file system, the tests should use getBaseTestDir, otherwise, we can use
487 // the working directory, and create a unique sub dir there
488 FileSystem fs
= getTestFileSystem();
490 String randomStr
= getRandomUUID().toString();
491 if (fs
.getUri().getScheme().equals(FileSystem
.getLocal(conf
).getUri().getScheme())) {
492 newDataTestDir
= new Path(getDataTestDir(), randomStr
);
493 File dataTestDir
= new File(newDataTestDir
.toString());
494 if (deleteOnExit()) dataTestDir
.deleteOnExit();
496 Path base
= getBaseTestDirOnTestFS();
497 newDataTestDir
= new Path(base
, randomStr
);
498 if (deleteOnExit()) fs
.deleteOnExit(newDataTestDir
);
500 return newDataTestDir
;
504 * Cleans the test data directory on the test filesystem.
505 * @return True if we removed the test dirs
507 public boolean cleanupDataTestDirOnTestFS() throws IOException
{
508 boolean ret
= getTestFileSystem().delete(dataTestDirOnTestFS
, true);
510 dataTestDirOnTestFS
= null;
516 * Cleans a subdirectory under the test data directory on the test filesystem.
517 * @return True if we removed child
519 public boolean cleanupDataTestDirOnTestFS(String subdirName
) throws IOException
{
520 Path cpath
= getDataTestDirOnTestFS(subdirName
);
521 return getTestFileSystem().delete(cpath
, true);
525 * Start a minidfscluster.
526 * @param servers How many DNs to start.
527 * @see #shutdownMiniDFSCluster()
528 * @return The mini dfs cluster created.
530 public MiniDFSCluster
startMiniDFSCluster(int servers
) throws Exception
{
531 return startMiniDFSCluster(servers
, null);
535 * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things
536 * like HDFS block location verification. If you start MiniDFSCluster without host names, all
537 * instances of the datanodes will have the same host name.
538 * @param hosts hostnames DNs to run on.
540 * @see #shutdownMiniDFSCluster()
541 * @return The mini dfs cluster created.
543 public MiniDFSCluster
startMiniDFSCluster(final String
[] hosts
) throws Exception
{
544 if (hosts
!= null && hosts
.length
!= 0) {
545 return startMiniDFSCluster(hosts
.length
, hosts
);
547 return startMiniDFSCluster(1, null);
552 * Start a minidfscluster. Can only create one.
553 * @param servers How many DNs to start.
554 * @param hosts hostnames DNs to run on.
556 * @see #shutdownMiniDFSCluster()
557 * @return The mini dfs cluster created.
559 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String
[] hosts
) throws Exception
{
560 return startMiniDFSCluster(servers
, null, hosts
);
563 private void setFs() throws IOException
{
564 if (this.dfsCluster
== null) {
565 LOG
.info("Skipping setting fs because dfsCluster is null");
568 FileSystem fs
= this.dfsCluster
.getFileSystem();
569 CommonFSUtils
.setFsDefault(this.conf
, new Path(fs
.getUri()));
571 // re-enable this check with dfs
572 conf
.unset(CommonFSUtils
.UNSAFE_STREAM_CAPABILITY_ENFORCE
);
575 public MiniDFSCluster
startMiniDFSCluster(int servers
, final String
[] racks
, String
[] hosts
)
577 createDirsAndSetProperties();
578 EditLogFileOutputStream
.setShouldSkipFsyncForTesting(true);
580 // Error level to skip some warnings specific to the minicluster. See HBASE-4709
581 Log4jUtils
.setLogLevel(org
.apache
.hadoop
.metrics2
.util
.MBeans
.class.getName(), "ERROR");
582 Log4jUtils
.setLogLevel(org
.apache
.hadoop
.metrics2
.impl
.MetricsSystemImpl
.class.getName(),
584 this.dfsCluster
= new MiniDFSCluster(0, this.conf
, servers
, true, true,
585 true, null, racks
, hosts
, null);
587 // Set this just-started cluster as our filesystem.
590 // Wait for the cluster to be totally up
591 this.dfsCluster
.waitClusterUp();
593 // reset the test directory for test file system
594 dataTestDirOnTestFS
= null;
595 String dataTestDir
= getDataTestDir().toString();
596 conf
.set(HConstants
.HBASE_DIR
, dataTestDir
);
597 LOG
.debug("Setting {} to {}", HConstants
.HBASE_DIR
, dataTestDir
);
599 return this.dfsCluster
;
602 public MiniDFSCluster
startMiniDFSClusterForTestWAL(int namenodePort
) throws IOException
{
603 createDirsAndSetProperties();
604 // Error level to skip some warnings specific to the minicluster. See HBASE-4709
605 Log4jUtils
.setLogLevel(org
.apache
.hadoop
.metrics2
.util
.MBeans
.class.getName(), "ERROR");
606 Log4jUtils
.setLogLevel(org
.apache
.hadoop
.metrics2
.impl
.MetricsSystemImpl
.class.getName(),
608 dfsCluster
= new MiniDFSCluster(namenodePort
, conf
, 5, false, true, true, null,
614 * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
615 * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
619 * Configuration conf = TEST_UTIL.getConfiguration();
620 * for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
621 * Map.Entry<String, String> e = i.next();
622 * assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
626 private void createDirsAndSetProperties() throws IOException
{
627 setupClusterTestDir();
628 conf
.set(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
629 System
.setProperty(TEST_DIRECTORY_KEY
, clusterTestDir
.getPath());
630 createDirAndSetProperty("test.cache.data");
631 createDirAndSetProperty("hadoop.tmp.dir");
632 hadoopLogDir
= createDirAndSetProperty("hadoop.log.dir");
633 createDirAndSetProperty("mapreduce.cluster.local.dir");
634 createDirAndSetProperty("mapreduce.cluster.temp.dir");
635 enableShortCircuit();
637 Path root
= getDataTestDirOnTestFS("hadoop");
638 conf
.set(MapreduceTestingShim
.getMROutputDirProp(),
639 new Path(root
, "mapred-output-dir").toString());
640 conf
.set("mapreduce.jobtracker.system.dir", new Path(root
, "mapred-system-dir").toString());
641 conf
.set("mapreduce.jobtracker.staging.root.dir",
642 new Path(root
, "mapreduce-jobtracker-staging-root-dir").toString());
643 conf
.set("mapreduce.job.working.dir", new Path(root
, "mapred-working-dir").toString());
644 conf
.set("yarn.app.mapreduce.am.staging-dir",
645 new Path(root
, "mapreduce-am-staging-root-dir").toString());
647 // Frustrate yarn's and hdfs's attempts at writing /tmp.
648 // Below is fragile. Make it so we just interpolate any 'tmp' reference.
649 createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
650 createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
651 createDirAndSetProperty("yarn.nodemanager.log-dirs");
652 createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
653 createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
654 createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
655 createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
656 createDirAndSetProperty("dfs.journalnode.edits.dir");
657 createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
658 createDirAndSetProperty("nfs.dump.dir");
659 createDirAndSetProperty("java.io.tmpdir");
660 createDirAndSetProperty("dfs.journalnode.edits.dir");
661 createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
662 createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
666 * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
669 public boolean isNewVersionBehaviorEnabled() {
670 final String propName
= "hbase.tests.new.version.behavior";
671 String v
= System
.getProperty(propName
);
673 return Boolean
.parseBoolean(v
);
679 * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
680 * allows to specify this parameter on the command line. If not set, default is true.
682 public boolean isReadShortCircuitOn() {
683 final String propName
= "hbase.tests.use.shortcircuit.reads";
684 String readOnProp
= System
.getProperty(propName
);
685 if (readOnProp
!= null) {
686 return Boolean
.parseBoolean(readOnProp
);
688 return conf
.getBoolean(propName
, false);
693 * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
694 * including skipping the hdfs checksum checks.
696 private void enableShortCircuit() {
697 if (isReadShortCircuitOn()) {
698 String curUser
= System
.getProperty("user.name");
699 LOG
.info("read short circuit is ON for user " + curUser
);
700 // read short circuit, for hdfs
701 conf
.set("dfs.block.local-path-access.user", curUser
);
702 // read short circuit, for hbase
703 conf
.setBoolean("dfs.client.read.shortcircuit", true);
704 // Skip checking checksum, for the hdfs client and the datanode
705 conf
.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
707 LOG
.info("read short circuit is OFF");
711 private String
createDirAndSetProperty(final String property
) {
712 return createDirAndSetProperty(property
, property
);
715 private String
createDirAndSetProperty(final String relPath
, String property
) {
716 String path
= getDataTestDir(relPath
).toString();
717 System
.setProperty(property
, path
);
718 conf
.set(property
, path
);
719 new File(path
).mkdirs();
720 LOG
.info("Setting " + property
+ " to " + path
+ " in system properties and HBase conf");
725 * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
727 public void shutdownMiniDFSCluster() throws IOException
{
728 if (this.dfsCluster
!= null) {
729 // The below throws an exception per dn, AsynchronousCloseException.
730 this.dfsCluster
.shutdown();
732 dataTestDirOnTestFS
= null;
733 CommonFSUtils
.setFsDefault(this.conf
, new Path("file:///"));
738 * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
739 * other options will use default values, defined in {@link StartTestingClusterOption.Builder}.
740 * @param numSlaves slave node number, for both HBase region server and HDFS data node.
741 * @see #startMiniCluster(StartTestingClusterOption option)
742 * @see #shutdownMiniDFSCluster()
744 public SingleProcessHBaseCluster
startMiniCluster(int numSlaves
) throws Exception
{
745 StartTestingClusterOption option
= StartTestingClusterOption
.builder()
746 .numRegionServers(numSlaves
).numDataNodes(numSlaves
).build();
747 return startMiniCluster(option
);
751 * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
752 * value can be found in {@link StartTestingClusterOption.Builder}.
753 * @see #startMiniCluster(StartTestingClusterOption option)
754 * @see #shutdownMiniDFSCluster()
756 public SingleProcessHBaseCluster
startMiniCluster() throws Exception
{
757 return startMiniCluster(StartTestingClusterOption
.builder().build());
761 * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
762 * Configuration. It homes the cluster data directory under a random subdirectory in a directory
763 * under System property test.build.data, to be cleaned up on exit.
764 * @see #shutdownMiniDFSCluster()
766 public SingleProcessHBaseCluster
startMiniCluster(StartTestingClusterOption option
)
768 LOG
.info("Starting up minicluster with option: {}", option
);
770 // If we already put up a cluster, fail.
771 if (miniClusterRunning
) {
772 throw new IllegalStateException("A mini-cluster is already running");
774 miniClusterRunning
= true;
776 setupClusterTestDir();
777 System
.setProperty(TEST_DIRECTORY_KEY
, this.clusterTestDir
.getPath());
779 // Bring up mini dfs cluster. This spews a bunch of warnings about missing
780 // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
781 if (dfsCluster
== null) {
782 LOG
.info("STARTING DFS");
783 dfsCluster
= startMiniDFSCluster(option
.getNumDataNodes(), option
.getDataNodeHosts());
785 LOG
.info("NOT STARTING DFS");
788 // Start up a zk cluster.
789 if (getZkCluster() == null) {
790 startMiniZKCluster(option
.getNumZkServers());
793 // Start the MiniHBaseCluster
794 return startMiniHBaseCluster(option
);
798 * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
799 * {@link #startMiniCluster()}. This is useful when doing stepped startup of clusters.
800 * @return Reference to the hbase mini hbase cluster.
801 * @see #startMiniCluster(StartTestingClusterOption)
802 * @see #shutdownMiniHBaseCluster()
804 public SingleProcessHBaseCluster
startMiniHBaseCluster(StartTestingClusterOption option
)
805 throws IOException
, InterruptedException
{
806 // Now do the mini hbase cluster. Set the hbase.rootdir in config.
807 createRootDir(option
.isCreateRootDir());
808 if (option
.isCreateWALDir()) {
811 // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
812 // for tests that do not read hbase-defaults.xml
815 // These settings will make the server waits until this exact number of
816 // regions servers are connected.
817 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1) == -1) {
818 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, option
.getNumRegionServers());
820 if (conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1) == -1) {
821 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, option
.getNumRegionServers());
824 // Avoid log flooded with chore execution time, see HBASE-24646 for more details.
825 Log4jUtils
.setLogLevel(org
.apache
.hadoop
.hbase
.ScheduledChore
.class.getName(), "INFO");
827 Configuration c
= new Configuration(this.conf
);
828 this.hbaseCluster
= new SingleProcessHBaseCluster(c
, option
.getNumMasters(),
829 option
.getNumAlwaysStandByMasters(), option
.getNumRegionServers(), option
.getRsPorts(),
830 option
.getMasterClass(), option
.getRsClass());
831 // Populate the master address configuration from mini cluster configuration.
832 conf
.set(HConstants
.MASTER_ADDRS_KEY
, MasterRegistry
.getMasterAddr(c
));
833 // Don't leave here till we've done a successful scan of the hbase:meta
834 try (Table t
= getConnection().getTable(TableName
.META_TABLE_NAME
);
835 ResultScanner s
= t
.getScanner(new Scan())) {
837 if (s
.next() == null) {
843 getAdmin(); // create immediately the hbaseAdmin
844 LOG
.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
846 return (SingleProcessHBaseCluster
) hbaseCluster
;
850 * Starts up mini hbase cluster using default options. Default options can be found in
851 * {@link StartTestingClusterOption.Builder}.
852 * @see #startMiniHBaseCluster(StartTestingClusterOption)
853 * @see #shutdownMiniHBaseCluster()
855 public SingleProcessHBaseCluster
startMiniHBaseCluster()
856 throws IOException
, InterruptedException
{
857 return startMiniHBaseCluster(StartTestingClusterOption
.builder().build());
861 * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
862 * {@link #startMiniCluster()}. All other options will use default values, defined in
863 * {@link StartTestingClusterOption.Builder}.
864 * @param numMasters Master node number.
865 * @param numRegionServers Number of region servers.
866 * @return The mini HBase cluster created.
867 * @see #shutdownMiniHBaseCluster()
868 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
869 * {@link #startMiniHBaseCluster(StartTestingClusterOption)} instead.
870 * @see #startMiniHBaseCluster(StartTestingClusterOption)
871 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
874 public SingleProcessHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
)
875 throws IOException
, InterruptedException
{
876 StartTestingClusterOption option
= StartTestingClusterOption
.builder().numMasters(numMasters
)
877 .numRegionServers(numRegionServers
).build();
878 return startMiniHBaseCluster(option
);
882 * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
883 * {@link #startMiniCluster()}. All other options will use default values, defined in
884 * {@link StartTestingClusterOption.Builder}.
885 * @param numMasters Master node number.
886 * @param numRegionServers Number of region servers.
887 * @param rsPorts Ports that RegionServer should use.
888 * @return The mini HBase cluster created.
889 * @see #shutdownMiniHBaseCluster()
890 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
891 * {@link #startMiniHBaseCluster(StartTestingClusterOption)} instead.
892 * @see #startMiniHBaseCluster(StartTestingClusterOption)
893 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
896 public SingleProcessHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
897 List
<Integer
> rsPorts
) throws IOException
, InterruptedException
{
898 StartTestingClusterOption option
= StartTestingClusterOption
.builder().numMasters(numMasters
)
899 .numRegionServers(numRegionServers
).rsPorts(rsPorts
).build();
900 return startMiniHBaseCluster(option
);
904 * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
905 * {@link #startMiniCluster()}. All other options will use default values, defined in
906 * {@link StartTestingClusterOption.Builder}.
907 * @param numMasters Master node number.
908 * @param numRegionServers Number of region servers.
909 * @param rsPorts Ports that RegionServer should use.
910 * @param masterClass The class to use as HMaster, or null for default.
911 * @param rsClass The class to use as HRegionServer, or null for default.
912 * @param createRootDir Whether to create a new root or data directory path.
913 * @param createWALDir Whether to create a new WAL directory.
914 * @return The mini HBase cluster created.
915 * @see #shutdownMiniHBaseCluster()
916 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
917 * {@link #startMiniHBaseCluster(StartTestingClusterOption)} instead.
918 * @see #startMiniHBaseCluster(StartTestingClusterOption)
919 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
922 public SingleProcessHBaseCluster
startMiniHBaseCluster(int numMasters
, int numRegionServers
,
923 List
<Integer
> rsPorts
, Class
<?
extends HMaster
> masterClass
,
924 Class
<?
extends SingleProcessHBaseCluster
.MiniHBaseClusterRegionServer
> rsClass
,
925 boolean createRootDir
, boolean createWALDir
) throws IOException
, InterruptedException
{
926 StartTestingClusterOption option
= StartTestingClusterOption
.builder().numMasters(numMasters
)
927 .masterClass(masterClass
).numRegionServers(numRegionServers
).rsClass(rsClass
).rsPorts(rsPorts
)
928 .createRootDir(createRootDir
).createWALDir(createWALDir
).build();
929 return startMiniHBaseCluster(option
);
933 * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
934 * want to keep dfs/zk up and just stop/start hbase.
935 * @param servers number of region servers
937 public void restartHBaseCluster(int servers
) throws IOException
, InterruptedException
{
938 this.restartHBaseCluster(servers
, null);
941 public void restartHBaseCluster(int servers
, List
<Integer
> ports
)
942 throws IOException
, InterruptedException
{
943 StartTestingClusterOption option
=
944 StartTestingClusterOption
.builder().numRegionServers(servers
).rsPorts(ports
).build();
945 restartHBaseCluster(option
);
946 invalidateConnection();
949 public void restartHBaseCluster(StartTestingClusterOption option
)
950 throws IOException
, InterruptedException
{
952 this.hbaseCluster
= new SingleProcessHBaseCluster(this.conf
, option
.getNumMasters(),
953 option
.getNumAlwaysStandByMasters(), option
.getNumRegionServers(), option
.getRsPorts(),
954 option
.getMasterClass(), option
.getRsClass());
955 // Don't leave here till we've done a successful scan of the hbase:meta
956 Connection conn
= ConnectionFactory
.createConnection(this.conf
);
957 Table t
= conn
.getTable(TableName
.META_TABLE_NAME
);
958 ResultScanner s
= t
.getScanner(new Scan());
959 while (s
.next() != null) {
962 LOG
.info("HBase has been restarted");
969 * Returns current mini hbase cluster. Only has something in it after a call to
970 * {@link #startMiniCluster()}.
971 * @see #startMiniCluster()
973 public SingleProcessHBaseCluster
getMiniHBaseCluster() {
974 if (this.hbaseCluster
== null || this.hbaseCluster
instanceof SingleProcessHBaseCluster
) {
975 return (SingleProcessHBaseCluster
) this.hbaseCluster
;
977 throw new RuntimeException(
978 hbaseCluster
+ " not an instance of " + SingleProcessHBaseCluster
.class.getName());
982 * Stops mini hbase, zk, and hdfs clusters.
983 * @see #startMiniCluster(int)
985 public void shutdownMiniCluster() throws IOException
{
986 LOG
.info("Shutting down minicluster");
987 shutdownMiniHBaseCluster();
988 shutdownMiniDFSCluster();
989 shutdownMiniZKCluster();
992 miniClusterRunning
= false;
993 LOG
.info("Minicluster is down");
997 * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
998 * @throws java.io.IOException in case command is unsuccessful
1000 public void shutdownMiniHBaseCluster() throws IOException
{
1002 if (this.hbaseCluster
!= null) {
1003 this.hbaseCluster
.shutdown();
1004 // Wait till hbase is down before going on to shutdown zk.
1005 this.hbaseCluster
.waitUntilShutDown();
1006 this.hbaseCluster
= null;
1008 if (zooKeeperWatcher
!= null) {
1009 zooKeeperWatcher
.close();
1010 zooKeeperWatcher
= null;
1015 * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1016 * @throws java.io.IOException throws in case command is unsuccessful
1018 public void killMiniHBaseCluster() throws IOException
{
1020 if (this.hbaseCluster
!= null) {
1021 getMiniHBaseCluster().killAll();
1022 this.hbaseCluster
= null;
1024 if (zooKeeperWatcher
!= null) {
1025 zooKeeperWatcher
.close();
1026 zooKeeperWatcher
= null;
1030 // close hbase admin, close current connection and reset MIN MAX configs for RS.
1031 private void cleanup() throws IOException
{
1033 // unset the configuration for MIN and MAX RS to start
1034 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
1035 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MAXTOSTART
, -1);
1039 * Returns the path to the default root dir the minicluster uses. If <code>create</code> is true,
1040 * a new root directory path is fetched irrespective of whether it has been fetched before or not.
1041 * If false, previous path is used. Note: this does not cause the root dir to be created.
1042 * @return Fully qualified path for the default hbase root dir
1043 * @throws IOException
1045 public Path
getDefaultRootDirPath(boolean create
) throws IOException
{
1047 return getDataTestDirOnTestFS();
1049 return getNewDataTestDirOnTestFS();
1054 * Same as {{@link HBaseTestingUtil#getDefaultRootDirPath(boolean create)} except that
1055 * <code>create</code> flag is false. Note: this does not cause the root dir to be created.
1056 * @return Fully qualified path for the default hbase root dir
1057 * @throws IOException
1059 public Path
getDefaultRootDirPath() throws IOException
{
1060 return getDefaultRootDirPath(false);
1064 * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
1065 * won't make use of this method. Root hbasedir is created for you as part of mini cluster
1066 * startup. You'd only use this method if you were doing manual operation.
1067 * @param create This flag decides whether to get a new root or data directory path or not, if it
1068 * has been fetched already. Note : Directory will be made irrespective of whether path
1069 * has been fetched or not. If directory already exists, it will be overwritten
1070 * @return Fully qualified path to hbase root dir
1071 * @throws IOException
1073 public Path
createRootDir(boolean create
) throws IOException
{
1074 FileSystem fs
= FileSystem
.get(this.conf
);
1075 Path hbaseRootdir
= getDefaultRootDirPath(create
);
1076 CommonFSUtils
.setRootDir(this.conf
, hbaseRootdir
);
1077 fs
.mkdirs(hbaseRootdir
);
1078 FSUtils
.setVersion(fs
, hbaseRootdir
);
1079 return hbaseRootdir
;
1083 * Same as {@link HBaseTestingUtil#createRootDir(boolean create)} except that <code>create</code>
1085 * @return Fully qualified path to hbase root dir
1086 * @throws IOException
1088 public Path
createRootDir() throws IOException
{
1089 return createRootDir(false);
1093 * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
1094 * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
1095 * this method if you were doing manual operation.
1096 * @return Fully qualified path to hbase root dir
1097 * @throws IOException
1099 public Path
createWALRootDir() throws IOException
{
1100 FileSystem fs
= FileSystem
.get(this.conf
);
1101 Path walDir
= getNewDataTestDirOnTestFS();
1102 CommonFSUtils
.setWALRootDir(this.conf
, walDir
);
1107 private void setHBaseFsTmpDir() throws IOException
{
1108 String hbaseFsTmpDirInString
= this.conf
.get("hbase.fs.tmp.dir");
1109 if (hbaseFsTmpDirInString
== null) {
1110 this.conf
.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1111 LOG
.info("Setting hbase.fs.tmp.dir to " + this.conf
.get("hbase.fs.tmp.dir"));
1113 LOG
.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString
);
1118 * Flushes all caches in the mini hbase cluster
1119 * @throws IOException
1121 public void flush() throws IOException
{
1122 getMiniHBaseCluster().flushcache();
1126 * Flushes all caches in the mini hbase cluster
1127 * @throws IOException
1129 public void flush(TableName tableName
) throws IOException
{
1130 getMiniHBaseCluster().flushcache(tableName
);
1134 * Compact all regions in the mini hbase cluster
1135 * @throws IOException
1137 public void compact(boolean major
) throws IOException
{
1138 getMiniHBaseCluster().compact(major
);
1142 * Compact all of a table's reagion in the mini hbase cluster
1143 * @throws IOException
1145 public void compact(TableName tableName
, boolean major
) throws IOException
{
1146 getMiniHBaseCluster().compact(tableName
, major
);
1153 * @return A Table instance for the created table.
1154 * @throws IOException
1156 public Table
createTable(TableName tableName
, String family
) throws IOException
{
1157 return createTable(tableName
, new String
[] { family
});
1164 * @return A Table instance for the created table.
1165 * @throws IOException
1167 public Table
createTable(TableName tableName
, String
[] families
) throws IOException
{
1168 List
<byte[]> fams
= new ArrayList
<>(families
.length
);
1169 for (String family
: families
) {
1170 fams
.add(Bytes
.toBytes(family
));
1172 return createTable(tableName
, fams
.toArray(new byte[0][]));
1179 * @return A Table instance for the created table.
1180 * @throws IOException
1182 public Table
createTable(TableName tableName
, byte[] family
) throws IOException
{
1183 return createTable(tableName
, new byte[][] { family
});
1187 * Create a table with multiple regions.
1191 * @return A Table instance for the created table.
1192 * @throws IOException
1194 public Table
createMultiRegionTable(TableName tableName
, byte[] family
, int numRegions
)
1195 throws IOException
{
1196 if (numRegions
< 3) throw new IOException("Must create at least 3 regions");
1197 byte[] startKey
= Bytes
.toBytes("aaaaa");
1198 byte[] endKey
= Bytes
.toBytes("zzzzz");
1199 byte[][] splitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
1201 return createTable(tableName
, new byte[][] { family
}, splitKeys
);
1208 * @return A Table instance for the created table.
1209 * @throws IOException
1211 public Table
createTable(TableName tableName
, byte[][] families
) throws IOException
{
1212 return createTable(tableName
, families
, (byte[][]) null);
1216 * Create a table with multiple regions.
1219 * @return A Table instance for the created table.
1220 * @throws IOException
1222 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
) throws IOException
{
1223 return createTable(tableName
, families
, KEYS_FOR_HBA_CREATE_TABLE
);
1227 * Create a table with multiple regions.
1229 * @param replicaCount replica count.
1231 * @return A Table instance for the created table.
1232 * @throws IOException
1234 public Table
createMultiRegionTable(TableName tableName
, int replicaCount
, byte[][] families
)
1235 throws IOException
{
1236 return createTable(tableName
, families
, KEYS_FOR_HBA_CREATE_TABLE
, replicaCount
);
1244 * @return A Table instance for the created table.
1245 * @throws IOException
1247 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
)
1248 throws IOException
{
1249 return createTable(tableName
, families
, splitKeys
, 1, new Configuration(getConfiguration()));
1254 * @param tableName the table name
1255 * @param families the families
1256 * @param splitKeys the splitkeys
1257 * @param replicaCount the region replica count
1258 * @return A Table instance for the created table.
1259 * @throws IOException throws IOException
1261 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1262 int replicaCount
) throws IOException
{
1263 return createTable(tableName
, families
, splitKeys
, replicaCount
,
1264 new Configuration(getConfiguration()));
1267 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
, byte[] startKey
,
1268 byte[] endKey
, int numRegions
) throws IOException
{
1269 TableDescriptor desc
= createTableDescriptor(tableName
, families
, numVersions
);
1271 getAdmin().createTable(desc
, startKey
, endKey
, numRegions
);
1272 // HBaseAdmin only waits for regions to appear in hbase:meta we
1273 // should wait until they are assigned
1274 waitUntilAllRegionsAssigned(tableName
);
1275 return getConnection().getTable(tableName
);
1280 * @param c Configuration to use
1281 * @return A Table instance for the created table.
1283 public Table
createTable(TableDescriptor htd
, byte[][] families
, Configuration c
)
1284 throws IOException
{
1285 return createTable(htd
, families
, null, c
);
1290 * @param htd table descriptor
1291 * @param families array of column families
1292 * @param splitKeys array of split keys
1293 * @param c Configuration to use
1294 * @return A Table instance for the created table.
1295 * @throws IOException if getAdmin or createTable fails
1297 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1298 Configuration c
) throws IOException
{
1299 // Disable blooms (they are on by default as of 0.95) but we disable them here because
1300 // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1301 // on is interfering.
1302 return createTable(htd
, families
, splitKeys
, BloomType
.NONE
, HConstants
.DEFAULT_BLOCKSIZE
, c
);
1307 * @param htd table descriptor
1308 * @param families array of column families
1309 * @param splitKeys array of split keys
1310 * @param type Bloom type
1311 * @param blockSize block size
1312 * @param c Configuration to use
1313 * @return A Table instance for the created table.
1314 * @throws IOException if getAdmin or createTable fails
1317 public Table
createTable(TableDescriptor htd
, byte[][] families
, byte[][] splitKeys
,
1318 BloomType type
, int blockSize
, Configuration c
) throws IOException
{
1319 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1320 for (byte[] family
: families
) {
1321 ColumnFamilyDescriptorBuilder cfdb
= ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1322 .setBloomFilterType(type
).setBlocksize(blockSize
);
1323 if (isNewVersionBehaviorEnabled()) {
1324 cfdb
.setNewVersionBehavior(true);
1326 builder
.setColumnFamily(cfdb
.build());
1328 TableDescriptor td
= builder
.build();
1329 if (splitKeys
!= null) {
1330 getAdmin().createTable(td
, splitKeys
);
1332 getAdmin().createTable(td
);
1334 // HBaseAdmin only waits for regions to appear in hbase:meta
1335 // we should wait until they are assigned
1336 waitUntilAllRegionsAssigned(td
.getTableName());
1337 return getConnection().getTable(td
.getTableName());
1342 * @param htd table descriptor
1343 * @param splitRows array of split keys
1344 * @return A Table instance for the created table.
1345 * @throws IOException
1347 public Table
createTable(TableDescriptor htd
, byte[][] splitRows
) throws IOException
{
1348 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(htd
);
1349 if (isNewVersionBehaviorEnabled()) {
1350 for (ColumnFamilyDescriptor family
: htd
.getColumnFamilies()) {
1351 builder
.setColumnFamily(
1352 ColumnFamilyDescriptorBuilder
.newBuilder(family
).setNewVersionBehavior(true).build());
1355 if (splitRows
!= null) {
1356 getAdmin().createTable(builder
.build(), splitRows
);
1358 getAdmin().createTable(builder
.build());
1360 // HBaseAdmin only waits for regions to appear in hbase:meta
1361 // we should wait until they are assigned
1362 waitUntilAllRegionsAssigned(htd
.getTableName());
1363 return getConnection().getTable(htd
.getTableName());
1368 * @param tableName the table name
1369 * @param families the families
1370 * @param splitKeys the split keys
1371 * @param replicaCount the replica count
1372 * @param c Configuration to use
1373 * @return A Table instance for the created table.
1375 public Table
createTable(TableName tableName
, byte[][] families
, byte[][] splitKeys
,
1376 int replicaCount
, final Configuration c
) throws IOException
{
1377 TableDescriptor htd
=
1378 TableDescriptorBuilder
.newBuilder(tableName
).setRegionReplication(replicaCount
).build();
1379 return createTable(htd
, families
, splitKeys
, c
);
1384 * @return A Table instance for the created table.
1386 public Table
createTable(TableName tableName
, byte[] family
, int numVersions
) throws IOException
{
1387 return createTable(tableName
, new byte[][] { family
}, numVersions
);
1392 * @return A Table instance for the created table.
1394 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
)
1395 throws IOException
{
1396 return createTable(tableName
, families
, numVersions
, (byte[][]) null);
1401 * @return A Table instance for the created table.
1403 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
,
1404 byte[][] splitKeys
) throws IOException
{
1405 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1406 for (byte[] family
: families
) {
1407 ColumnFamilyDescriptorBuilder cfBuilder
=
1408 ColumnFamilyDescriptorBuilder
.newBuilder(family
).setMaxVersions(numVersions
);
1409 if (isNewVersionBehaviorEnabled()) {
1410 cfBuilder
.setNewVersionBehavior(true);
1412 builder
.setColumnFamily(cfBuilder
.build());
1414 if (splitKeys
!= null) {
1415 getAdmin().createTable(builder
.build(), splitKeys
);
1417 getAdmin().createTable(builder
.build());
1419 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1421 waitUntilAllRegionsAssigned(tableName
);
1422 return getConnection().getTable(tableName
);
1426 * Create a table with multiple regions.
1427 * @return A Table instance for the created table.
1429 public Table
createMultiRegionTable(TableName tableName
, byte[][] families
, int numVersions
)
1430 throws IOException
{
1431 return createTable(tableName
, families
, numVersions
, KEYS_FOR_HBA_CREATE_TABLE
);
1436 * @return A Table instance for the created table.
1438 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
, int blockSize
)
1439 throws IOException
{
1440 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1441 for (byte[] family
: families
) {
1442 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1443 .setMaxVersions(numVersions
).setBlocksize(blockSize
);
1444 if (isNewVersionBehaviorEnabled()) {
1445 cfBuilder
.setNewVersionBehavior(true);
1447 builder
.setColumnFamily(cfBuilder
.build());
1449 getAdmin().createTable(builder
.build());
1450 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1452 waitUntilAllRegionsAssigned(tableName
);
1453 return getConnection().getTable(tableName
);
1456 public Table
createTable(TableName tableName
, byte[][] families
, int numVersions
, int blockSize
,
1457 String cpName
) throws IOException
{
1458 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1459 for (byte[] family
: families
) {
1460 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(family
)
1461 .setMaxVersions(numVersions
).setBlocksize(blockSize
);
1462 if (isNewVersionBehaviorEnabled()) {
1463 cfBuilder
.setNewVersionBehavior(true);
1465 builder
.setColumnFamily(cfBuilder
.build());
1467 if (cpName
!= null) {
1468 builder
.setCoprocessor(cpName
);
1470 getAdmin().createTable(builder
.build());
1471 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1473 waitUntilAllRegionsAssigned(tableName
);
1474 return getConnection().getTable(tableName
);
1479 * @return A Table instance for the created table.
1481 public Table
createTable(TableName tableName
, byte[][] families
, int[] numVersions
)
1482 throws IOException
{
1483 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1485 for (byte[] family
: families
) {
1486 ColumnFamilyDescriptorBuilder cfBuilder
=
1487 ColumnFamilyDescriptorBuilder
.newBuilder(family
).setMaxVersions(numVersions
[i
]);
1488 if (isNewVersionBehaviorEnabled()) {
1489 cfBuilder
.setNewVersionBehavior(true);
1491 builder
.setColumnFamily(cfBuilder
.build());
1494 getAdmin().createTable(builder
.build());
1495 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1497 waitUntilAllRegionsAssigned(tableName
);
1498 return getConnection().getTable(tableName
);
1503 * @return A Table instance for the created table.
1505 public Table
createTable(TableName tableName
, byte[] family
, byte[][] splitRows
)
1506 throws IOException
{
1507 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1508 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(family
);
1509 if (isNewVersionBehaviorEnabled()) {
1510 cfBuilder
.setNewVersionBehavior(true);
1512 builder
.setColumnFamily(cfBuilder
.build());
1513 getAdmin().createTable(builder
.build(), splitRows
);
1514 // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1516 waitUntilAllRegionsAssigned(tableName
);
1517 return getConnection().getTable(tableName
);
1521 * Create a table with multiple regions.
1522 * @return A Table instance for the created table.
1524 public Table
createMultiRegionTable(TableName tableName
, byte[] family
) throws IOException
{
1525 return createTable(tableName
, family
, KEYS_FOR_HBA_CREATE_TABLE
);
1529 * Set the number of Region replicas.
1531 public static void setReplicas(Admin admin
, TableName table
, int replicaCount
)
1532 throws IOException
, InterruptedException
{
1533 TableDescriptor desc
= TableDescriptorBuilder
.newBuilder(admin
.getDescriptor(table
))
1534 .setRegionReplication(replicaCount
).build();
1535 admin
.modifyTable(desc
);
1539 * Drop an existing table
1540 * @param tableName existing table
1542 public void deleteTable(TableName tableName
) throws IOException
{
1544 getAdmin().disableTable(tableName
);
1545 } catch (TableNotEnabledException e
) {
1546 LOG
.debug("Table: " + tableName
+ " already disabled, so just deleting it.");
1548 getAdmin().deleteTable(tableName
);
1552 * Drop an existing table
1553 * @param tableName existing table
1555 public void deleteTableIfAny(TableName tableName
) throws IOException
{
1557 deleteTable(tableName
);
1558 } catch (TableNotFoundException e
) {
1563 // ==========================================================================
1564 // Canned table and table descriptor creation
1566 public final static byte[] fam1
= Bytes
.toBytes("colfamily11");
1567 public final static byte[] fam2
= Bytes
.toBytes("colfamily21");
1568 public final static byte[] fam3
= Bytes
.toBytes("colfamily31");
1569 public static final byte[][] COLUMNS
= { fam1
, fam2
, fam3
};
1570 private static final int MAXVERSIONS
= 3;
1572 public static final char FIRST_CHAR
= 'a';
1573 public static final char LAST_CHAR
= 'z';
1574 public static final byte[] START_KEY_BYTES
= { FIRST_CHAR
, FIRST_CHAR
, FIRST_CHAR
};
1575 public static final String START_KEY
= new String(START_KEY_BYTES
, HConstants
.UTF8_CHARSET
);
1577 public TableDescriptorBuilder
createModifyableTableDescriptor(final String name
) {
1578 return createModifyableTableDescriptor(TableName
.valueOf(name
),
1579 ColumnFamilyDescriptorBuilder
.DEFAULT_MIN_VERSIONS
, MAXVERSIONS
, HConstants
.FOREVER
,
1580 ColumnFamilyDescriptorBuilder
.DEFAULT_KEEP_DELETED
);
1583 public TableDescriptor
createTableDescriptor(final TableName name
, final int minVersions
,
1584 final int versions
, final int ttl
, KeepDeletedCells keepDeleted
) {
1585 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(name
);
1586 for (byte[] cfName
: new byte[][] { fam1
, fam2
, fam3
}) {
1587 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(cfName
)
1588 .setMinVersions(minVersions
).setMaxVersions(versions
).setKeepDeletedCells(keepDeleted
)
1589 .setBlockCacheEnabled(false).setTimeToLive(ttl
);
1590 if (isNewVersionBehaviorEnabled()) {
1591 cfBuilder
.setNewVersionBehavior(true);
1593 builder
.setColumnFamily(cfBuilder
.build());
1595 return builder
.build();
1598 public TableDescriptorBuilder
createModifyableTableDescriptor(final TableName name
,
1599 final int minVersions
, final int versions
, final int ttl
, KeepDeletedCells keepDeleted
) {
1600 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(name
);
1601 for (byte[] cfName
: new byte[][] { fam1
, fam2
, fam3
}) {
1602 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(cfName
)
1603 .setMinVersions(minVersions
).setMaxVersions(versions
).setKeepDeletedCells(keepDeleted
)
1604 .setBlockCacheEnabled(false).setTimeToLive(ttl
);
1605 if (isNewVersionBehaviorEnabled()) {
1606 cfBuilder
.setNewVersionBehavior(true);
1608 builder
.setColumnFamily(cfBuilder
.build());
1614 * Create a table of name <code>name</code>.
1615 * @param name Name to give table.
1616 * @return Column descriptor.
1618 public TableDescriptor
createTableDescriptor(final TableName name
) {
1619 return createTableDescriptor(name
, ColumnFamilyDescriptorBuilder
.DEFAULT_MIN_VERSIONS
,
1620 MAXVERSIONS
, HConstants
.FOREVER
, ColumnFamilyDescriptorBuilder
.DEFAULT_KEEP_DELETED
);
1623 public TableDescriptor
createTableDescriptor(final TableName tableName
, byte[] family
) {
1624 return createTableDescriptor(tableName
, new byte[][] { family
}, 1);
1627 public TableDescriptor
createTableDescriptor(final TableName tableName
, byte[][] families
,
1629 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1630 for (byte[] family
: families
) {
1631 ColumnFamilyDescriptorBuilder cfBuilder
=
1632 ColumnFamilyDescriptorBuilder
.newBuilder(family
).setMaxVersions(maxVersions
);
1633 if (isNewVersionBehaviorEnabled()) {
1634 cfBuilder
.setNewVersionBehavior(true);
1636 builder
.setColumnFamily(cfBuilder
.build());
1638 return builder
.build();
1642 * Create an HRegion that writes to the local tmp dirs
1643 * @param desc a table descriptor indicating which table the region belongs to
1644 * @param startKey the start boundary of the region
1645 * @param endKey the end boundary of the region
1646 * @return a region that writes to local dir for testing
1648 public HRegion
createLocalHRegion(TableDescriptor desc
, byte[] startKey
, byte[] endKey
)
1649 throws IOException
{
1650 RegionInfo hri
= RegionInfoBuilder
.newBuilder(desc
.getTableName()).setStartKey(startKey
)
1651 .setEndKey(endKey
).build();
1652 return createLocalHRegion(hri
, desc
);
1656 * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
1657 * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} when you're finished with it.
1659 public HRegion
createLocalHRegion(RegionInfo info
, TableDescriptor desc
) throws IOException
{
1660 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), desc
);
1664 * Create an HRegion that writes to the local tmp dirs with specified wal
1665 * @param info regioninfo
1666 * @param conf configuration
1667 * @param desc table descriptor
1668 * @param wal wal for this region.
1669 * @return created hregion
1670 * @throws IOException
1672 public HRegion
createLocalHRegion(RegionInfo info
, Configuration conf
, TableDescriptor desc
,
1673 WAL wal
) throws IOException
{
1674 return HRegion
.createHRegion(info
, getDataTestDir(), conf
, desc
, wal
);
1683 * @return A region on which you must call {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)}
1685 * @throws IOException
1687 public HRegion
createLocalHRegion(TableName tableName
, byte[] startKey
, byte[] stopKey
,
1688 Configuration conf
, boolean isReadOnly
, Durability durability
, WAL wal
, byte[]... families
)
1689 throws IOException
{
1690 return createLocalHRegionWithInMemoryFlags(tableName
, startKey
, stopKey
, conf
, isReadOnly
,
1691 durability
, wal
, null, families
);
1694 public HRegion
createLocalHRegionWithInMemoryFlags(TableName tableName
, byte[] startKey
,
1695 byte[] stopKey
, Configuration conf
, boolean isReadOnly
, Durability durability
, WAL wal
,
1696 boolean[] compactedMemStore
, byte[]... families
) throws IOException
{
1697 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
1698 builder
.setReadOnly(isReadOnly
);
1700 for (byte[] family
: families
) {
1701 ColumnFamilyDescriptorBuilder cfBuilder
= ColumnFamilyDescriptorBuilder
.newBuilder(family
);
1702 if (compactedMemStore
!= null && i
< compactedMemStore
.length
) {
1703 cfBuilder
.setInMemoryCompaction(MemoryCompactionPolicy
.BASIC
);
1705 cfBuilder
.setInMemoryCompaction(MemoryCompactionPolicy
.NONE
);
1709 // Set default to be three versions.
1710 cfBuilder
.setMaxVersions(Integer
.MAX_VALUE
);
1711 builder
.setColumnFamily(cfBuilder
.build());
1713 builder
.setDurability(durability
);
1715 RegionInfoBuilder
.newBuilder(tableName
).setStartKey(startKey
).setEndKey(stopKey
).build();
1716 return createLocalHRegion(info
, conf
, builder
.build(), wal
);
1720 // ==========================================================================
1723 * Provide an existing table name to truncate. Scans the table and issues a delete for each row
1725 * @param tableName existing table
1726 * @return HTable to that new table
1727 * @throws IOException
1729 public Table
deleteTableData(TableName tableName
) throws IOException
{
1730 Table table
= getConnection().getTable(tableName
);
1731 Scan scan
= new Scan();
1732 ResultScanner resScan
= table
.getScanner(scan
);
1733 for (Result res
: resScan
) {
1734 Delete del
= new Delete(res
.getRow());
1737 resScan
= table
.getScanner(scan
);
1743 * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
1745 * @param tableName table which must exist.
1746 * @param preserveRegions keep the existing split points
1747 * @return HTable for the new table
1749 public Table
truncateTable(final TableName tableName
, final boolean preserveRegions
)
1750 throws IOException
{
1751 Admin admin
= getAdmin();
1752 if (!admin
.isTableDisabled(tableName
)) {
1753 admin
.disableTable(tableName
);
1755 admin
.truncateTable(tableName
, preserveRegions
);
1756 return getConnection().getTable(tableName
);
1760 * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
1761 * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
1762 * preserve regions of existing table.
1763 * @param tableName table which must exist.
1764 * @return HTable for the new table
1766 public Table
truncateTable(final TableName tableName
) throws IOException
{
1767 return truncateTable(tableName
, false);
1771 * Load table with rows from 'aaa' to 'zzz'.
1774 * @return Count of rows loaded.
1775 * @throws IOException
1777 public int loadTable(final Table t
, final byte[] f
) throws IOException
{
1778 return loadTable(t
, new byte[][] { f
});
1782 * Load table with rows from 'aaa' to 'zzz'.
1785 * @return Count of rows loaded.
1786 * @throws IOException
1788 public int loadTable(final Table t
, final byte[] f
, boolean writeToWAL
) throws IOException
{
1789 return loadTable(t
, new byte[][] { f
}, null, writeToWAL
);
1793 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1795 * @param f Array of Families to load
1796 * @return Count of rows loaded.
1797 * @throws IOException
1799 public int loadTable(final Table t
, final byte[][] f
) throws IOException
{
1800 return loadTable(t
, f
, null);
1804 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1806 * @param f Array of Families to load
1807 * @param value the values of the cells. If null is passed, the row key is used as value
1808 * @return Count of rows loaded.
1809 * @throws IOException
1811 public int loadTable(final Table t
, final byte[][] f
, byte[] value
) throws IOException
{
1812 return loadTable(t
, f
, value
, true);
1816 * Load table of multiple column families with rows from 'aaa' to 'zzz'.
1818 * @param f Array of Families to load
1819 * @param value the values of the cells. If null is passed, the row key is used as value
1820 * @return Count of rows loaded.
1821 * @throws IOException
1823 public int loadTable(final Table t
, final byte[][] f
, byte[] value
, boolean writeToWAL
)
1824 throws IOException
{
1825 List
<Put
> puts
= new ArrayList
<>();
1826 for (byte[] row
: HBaseTestingUtil
.ROWS
) {
1827 Put put
= new Put(row
);
1828 put
.setDurability(writeToWAL ? Durability
.USE_DEFAULT
: Durability
.SKIP_WAL
);
1829 for (int i
= 0; i
< f
.length
; i
++) {
1830 byte[] value1
= value
!= null ? value
: row
;
1831 put
.addColumn(f
[i
], f
[i
], value1
);
1840 * A tracker for tracking and validating table rows generated with
1841 * {@link HBaseTestingUtil#loadTable(Table, byte[])}
1843 public static class SeenRowTracker
{
1844 int dim
= 'z' - 'a' + 1;
1845 int[][][] seenRows
= new int[dim
][dim
][dim
]; // count of how many times the row is seen
1849 public SeenRowTracker(byte[] startRow
, byte[] stopRow
) {
1850 this.startRow
= startRow
;
1851 this.stopRow
= stopRow
;
1855 for (byte[] row
: ROWS
) {
1856 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])] = 0;
1864 public void addRow(byte[] row
) {
1865 seenRows
[i(row
[0])][i(row
[1])][i(row
[2])]++;
1869 * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
1872 public void validate() {
1873 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
1874 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
1875 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
1876 int count
= seenRows
[i(b1
)][i(b2
)][i(b3
)];
1877 int expectedCount
= 0;
1878 if (Bytes
.compareTo(new byte[] { b1
, b2
, b3
}, startRow
) >= 0 &&
1879 Bytes
.compareTo(new byte[] { b1
, b2
, b3
}, stopRow
) < 0) {
1882 if (count
!= expectedCount
) {
1883 String row
= new String(new byte[] { b1
, b2
, b3
}, StandardCharsets
.UTF_8
);
1884 throw new RuntimeException("Row:" + row
+ " has a seen count of " + count
+ " " +
1885 "instead of " + expectedCount
);
1893 public int loadRegion(final HRegion r
, final byte[] f
) throws IOException
{
1894 return loadRegion(r
, f
, false);
1897 public int loadRegion(final Region r
, final byte[] f
) throws IOException
{
1898 return loadRegion((HRegion
) r
, f
);
1902 * Load region with rows from 'aaa' to 'zzz'.
1905 * @param flush flush the cache if true
1906 * @return Count of rows loaded.
1907 * @throws IOException
1909 public int loadRegion(final HRegion r
, final byte[] f
, final boolean flush
) throws IOException
{
1910 byte[] k
= new byte[3];
1912 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
1913 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
1914 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
1918 Put put
= new Put(k
);
1919 put
.setDurability(Durability
.SKIP_WAL
);
1920 put
.addColumn(f
, null, k
);
1921 if (r
.getWAL() == null) {
1922 put
.setDurability(Durability
.SKIP_WAL
);
1924 int preRowCount
= rowCount
;
1926 int maxPause
= 1000;
1927 while (rowCount
== preRowCount
) {
1931 } catch (RegionTooBusyException e
) {
1932 pause
= (pause
* 2 >= maxPause
) ? maxPause
: pause
* 2;
1933 Threads
.sleep(pause
);
1945 public void loadNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
1946 throws IOException
{
1947 for (int i
= startRow
; i
< endRow
; i
++) {
1948 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
1949 Put put
= new Put(data
);
1950 put
.addColumn(f
, null, data
);
1955 public void loadRandomRows(final Table t
, final byte[] f
, int rowSize
, int totalRows
)
1956 throws IOException
{
1957 Random r
= new Random();
1958 byte[] row
= new byte[rowSize
];
1959 for (int i
= 0; i
< totalRows
; i
++) {
1961 Put put
= new Put(row
);
1962 put
.addColumn(f
, new byte[] { 0 }, new byte[] { 0 });
1967 public void verifyNumericRows(Table table
, final byte[] f
, int startRow
, int endRow
,
1968 int replicaId
) throws IOException
{
1969 for (int i
= startRow
; i
< endRow
; i
++) {
1970 String failMsg
= "Failed verification of row :" + i
;
1971 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
1972 Get get
= new Get(data
);
1973 get
.setReplicaId(replicaId
);
1974 get
.setConsistency(Consistency
.TIMELINE
);
1975 Result result
= table
.get(get
);
1976 assertTrue(failMsg
, result
.containsColumn(f
, null));
1977 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
1978 Cell cell
= result
.getColumnLatestCell(f
, null);
1979 assertTrue(failMsg
, Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(),
1980 cell
.getValueOffset(), cell
.getValueLength()));
1984 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
)
1985 throws IOException
{
1986 verifyNumericRows((HRegion
) region
, f
, startRow
, endRow
);
1989 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
)
1990 throws IOException
{
1991 verifyNumericRows(region
, f
, startRow
, endRow
, true);
1994 public void verifyNumericRows(Region region
, final byte[] f
, int startRow
, int endRow
,
1995 final boolean present
) throws IOException
{
1996 verifyNumericRows((HRegion
) region
, f
, startRow
, endRow
, present
);
1999 public void verifyNumericRows(HRegion region
, final byte[] f
, int startRow
, int endRow
,
2000 final boolean present
) throws IOException
{
2001 for (int i
= startRow
; i
< endRow
; i
++) {
2002 String failMsg
= "Failed verification of row :" + i
;
2003 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2004 Result result
= region
.get(new Get(data
));
2006 boolean hasResult
= result
!= null && !result
.isEmpty();
2007 assertEquals(failMsg
+ result
, present
, hasResult
);
2008 if (!present
) continue;
2010 assertTrue(failMsg
, result
.containsColumn(f
, null));
2011 assertEquals(failMsg
, 1, result
.getColumnCells(f
, null).size());
2012 Cell cell
= result
.getColumnLatestCell(f
, null);
2013 assertTrue(failMsg
, Bytes
.equals(data
, 0, data
.length
, cell
.getValueArray(),
2014 cell
.getValueOffset(), cell
.getValueLength()));
2018 public void deleteNumericRows(final Table t
, final byte[] f
, int startRow
, int endRow
)
2019 throws IOException
{
2020 for (int i
= startRow
; i
< endRow
; i
++) {
2021 byte[] data
= Bytes
.toBytes(String
.valueOf(i
));
2022 Delete delete
= new Delete(data
);
2023 delete
.addFamily(f
);
2029 * Return the number of rows in the given table.
2030 * @param table to count rows
2031 * @return count of rows
2033 public static int countRows(final Table table
) throws IOException
{
2034 return countRows(table
, new Scan());
2037 public static int countRows(final Table table
, final Scan scan
) throws IOException
{
2038 try (ResultScanner results
= table
.getScanner(scan
)) {
2040 while (results
.next() != null) {
2047 public static int countRows(final Table table
, final byte[]... families
) throws IOException
{
2048 Scan scan
= new Scan();
2049 for (byte[] family
: families
) {
2050 scan
.addFamily(family
);
2052 return countRows(table
, scan
);
2056 * Return the number of rows in the given table.
2058 public int countRows(final TableName tableName
) throws IOException
{
2059 try (Table table
= getConnection().getTable(tableName
)) {
2060 return countRows(table
);
2064 public static int countRows(final Region region
) throws IOException
{
2065 return countRows(region
, new Scan());
2068 public static int countRows(final Region region
, final Scan scan
) throws IOException
{
2069 try (InternalScanner scanner
= region
.getScanner(scan
)) {
2070 return countRows(scanner
);
2074 public static int countRows(final InternalScanner scanner
) throws IOException
{
2075 int scannedCount
= 0;
2076 List
<Cell
> results
= new ArrayList
<>();
2077 boolean hasMore
= true;
2079 hasMore
= scanner
.next(results
);
2080 scannedCount
+= results
.size();
2083 return scannedCount
;
2087 * Return an md5 digest of the entire contents of a table.
2089 public String
checksumRows(final Table table
) throws Exception
{
2090 MessageDigest digest
= MessageDigest
.getInstance("MD5");
2091 try (ResultScanner results
= table
.getScanner(new Scan())) {
2092 for (Result res
: results
) {
2093 digest
.update(res
.getRow());
2096 return digest
.toString();
2099 /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2100 public static final byte[][] ROWS
= new byte[(int) Math
.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2103 for (byte b1
= 'a'; b1
<= 'z'; b1
++) {
2104 for (byte b2
= 'a'; b2
<= 'z'; b2
++) {
2105 for (byte b3
= 'a'; b3
<= 'z'; b3
++) {
2115 public static final byte[][] KEYS
= { HConstants
.EMPTY_BYTE_ARRAY
, Bytes
.toBytes("bbb"),
2116 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"), Bytes
.toBytes("fff"),
2117 Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"), Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"),
2118 Bytes
.toBytes("kkk"), Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2119 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"), Bytes
.toBytes("rrr"),
2120 Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"), Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"),
2121 Bytes
.toBytes("www"), Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy") };
2123 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE
= { Bytes
.toBytes("bbb"),
2124 Bytes
.toBytes("ccc"), Bytes
.toBytes("ddd"), Bytes
.toBytes("eee"), Bytes
.toBytes("fff"),
2125 Bytes
.toBytes("ggg"), Bytes
.toBytes("hhh"), Bytes
.toBytes("iii"), Bytes
.toBytes("jjj"),
2126 Bytes
.toBytes("kkk"), Bytes
.toBytes("lll"), Bytes
.toBytes("mmm"), Bytes
.toBytes("nnn"),
2127 Bytes
.toBytes("ooo"), Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq"), Bytes
.toBytes("rrr"),
2128 Bytes
.toBytes("sss"), Bytes
.toBytes("ttt"), Bytes
.toBytes("uuu"), Bytes
.toBytes("vvv"),
2129 Bytes
.toBytes("www"), Bytes
.toBytes("xxx"), Bytes
.toBytes("yyy"), Bytes
.toBytes("zzz") };
2132 * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
2133 * first startKey should be a 0 length byte array if you want to form a proper range of regions.
2134 * @return list of region info for regions added to meta
2136 public List
<RegionInfo
> createMultiRegionsInMeta(final Configuration conf
,
2137 final TableDescriptor htd
, byte[][] startKeys
) throws IOException
{
2138 try (Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
)) {
2139 Arrays
.sort(startKeys
, Bytes
.BYTES_COMPARATOR
);
2140 List
<RegionInfo
> newRegions
= new ArrayList
<>(startKeys
.length
);
2141 MetaTableAccessor
.updateTableState(getConnection(), htd
.getTableName(),
2142 TableState
.State
.ENABLED
);
2144 for (int i
= 0; i
< startKeys
.length
; i
++) {
2145 int j
= (i
+ 1) % startKeys
.length
;
2146 RegionInfo hri
= RegionInfoBuilder
.newBuilder(htd
.getTableName()).setStartKey(startKeys
[i
])
2147 .setEndKey(startKeys
[j
]).build();
2148 MetaTableAccessor
.addRegionsToMeta(getConnection(), Collections
.singletonList(hri
), 1);
2149 newRegions
.add(hri
);
2156 * Create an unmanaged WAL. Be sure to close it when you're through.
2158 public static WAL
createWal(final Configuration conf
, final Path rootDir
, final RegionInfo hri
)
2159 throws IOException
{
2160 // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2161 // unless I pass along via the conf.
2162 Configuration confForWAL
= new Configuration(conf
);
2163 confForWAL
.set(HConstants
.HBASE_DIR
, rootDir
.toString());
2164 return new WALFactory(confForWAL
, "hregion-" + RandomStringUtils
.randomNumeric(8)).getWAL(hri
);
2168 * Create a region with it's own WAL. Be sure to call
2169 * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
2171 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2172 final Configuration conf
, final TableDescriptor htd
) throws IOException
{
2173 return createRegionAndWAL(info
, rootDir
, conf
, htd
, true);
2177 * Create a region with it's own WAL. Be sure to call
2178 * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
2180 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2181 final Configuration conf
, final TableDescriptor htd
, BlockCache blockCache
) throws IOException
{
2182 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2183 region
.setBlockCache(blockCache
);
2184 region
.initialize();
2189 * Create a region with it's own WAL. Be sure to call
2190 * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
2192 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2193 final Configuration conf
, final TableDescriptor htd
, MobFileCache mobFileCache
)
2194 throws IOException
{
2195 HRegion region
= createRegionAndWAL(info
, rootDir
, conf
, htd
, false);
2196 region
.setMobFileCache(mobFileCache
);
2197 region
.initialize();
2202 * Create a region with it's own WAL. Be sure to call
2203 * {@link HBaseTestingUtil#closeRegionAndWAL(HRegion)} to clean up all resources.
2205 public static HRegion
createRegionAndWAL(final RegionInfo info
, final Path rootDir
,
2206 final Configuration conf
, final TableDescriptor htd
, boolean initialize
) throws IOException
{
2207 ChunkCreator
.initialize(MemStoreLAB
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null,
2208 MemStoreLAB
.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT
);
2209 WAL wal
= createWal(conf
, rootDir
, info
);
2210 return HRegion
.createHRegion(info
, rootDir
, conf
, htd
, wal
, initialize
);
2214 * Find any other region server which is different from the one identified by parameter
2215 * @return another region server
2217 public HRegionServer
getOtherRegionServer(HRegionServer rs
) {
2218 for (JVMClusterUtil
.RegionServerThread rst
: getMiniHBaseCluster().getRegionServerThreads()) {
2219 if (!(rst
.getRegionServer() == rs
)) {
2220 return rst
.getRegionServer();
2227 * Tool to get the reference to the region server object that holds the region of the specified
2229 * @param tableName user table to lookup in hbase:meta
2230 * @return region server that holds it, null if the row doesn't exist
2232 public HRegionServer
getRSForFirstRegionInTable(TableName tableName
)
2233 throws IOException
, InterruptedException
{
2234 List
<RegionInfo
> regions
= getAdmin().getRegions(tableName
);
2235 if (regions
== null || regions
.isEmpty()) {
2238 LOG
.debug("Found " + regions
.size() + " regions for table " + tableName
);
2240 byte[] firstRegionName
=
2241 regions
.stream().filter(r
-> !r
.isOffline()).map(RegionInfo
::getRegionName
).findFirst()
2242 .orElseThrow(() -> new IOException("online regions not found in table " + tableName
));
2244 LOG
.debug("firstRegionName=" + Bytes
.toString(firstRegionName
));
2245 long pause
= getConfiguration().getLong(HConstants
.HBASE_CLIENT_PAUSE
,
2246 HConstants
.DEFAULT_HBASE_CLIENT_PAUSE
);
2247 int numRetries
= getConfiguration().getInt(HConstants
.HBASE_CLIENT_RETRIES_NUMBER
,
2248 HConstants
.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER
);
2249 RetryCounter retrier
= new RetryCounter(numRetries
+ 1, (int) pause
, TimeUnit
.MICROSECONDS
);
2250 while (retrier
.shouldRetry()) {
2251 int index
= getMiniHBaseCluster().getServerWith(firstRegionName
);
2253 return getMiniHBaseCluster().getRegionServerThreads().get(index
).getRegionServer();
2255 // Came back -1. Region may not be online yet. Sleep a while.
2256 retrier
.sleepUntilNextRetry();
2262 * Starts a <code>MiniMRCluster</code> with a default number of <code>TaskTracker</code>'s.
2263 * @throws IOException When starting the cluster fails.
2265 public MiniMRCluster
startMiniMapReduceCluster() throws IOException
{
2266 // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2267 conf
.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2269 startMiniMapReduceCluster(2);
2274 * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
2275 * internal static LOG_DIR variable.
2277 private void forceChangeTaskLogDir() {
2280 logDirField
= TaskLog
.class.getDeclaredField("LOG_DIR");
2281 logDirField
.setAccessible(true);
2283 Field modifiersField
= ReflectionUtils
.getModifiersField();
2284 modifiersField
.setAccessible(true);
2285 modifiersField
.setInt(logDirField
, logDirField
.getModifiers() & ~Modifier
.FINAL
);
2287 logDirField
.set(null, new File(hadoopLogDir
, "userlogs"));
2288 } catch (SecurityException e
) {
2289 throw new RuntimeException(e
);
2290 } catch (NoSuchFieldException e
) {
2291 throw new RuntimeException(e
);
2292 } catch (IllegalArgumentException e
) {
2293 throw new RuntimeException(e
);
2294 } catch (IllegalAccessException e
) {
2295 throw new RuntimeException(e
);
2300 * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2302 * @param servers The number of <code>TaskTracker</code>'s to start.
2303 * @throws IOException When starting the cluster fails.
2305 private void startMiniMapReduceCluster(final int servers
) throws IOException
{
2306 if (mrCluster
!= null) {
2307 throw new IllegalStateException("MiniMRCluster is already running");
2309 LOG
.info("Starting mini mapreduce cluster...");
2310 setupClusterTestDir();
2311 createDirsAndSetProperties();
2313 forceChangeTaskLogDir();
2315 //// hadoop2 specific settings
2316 // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2317 // we up the VM usable so that processes don't get killed.
2318 conf
.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f
);
2320 // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2321 // this avoids the problem by disabling speculative task execution in tests.
2322 conf
.setBoolean("mapreduce.map.speculative", false);
2323 conf
.setBoolean("mapreduce.reduce.speculative", false);
2326 // Yarn container runs in independent JVM. We need to pass the argument manually here if the
2327 // JDK version >= 17. Otherwise, the MiniMRCluster will fail.
2328 if (JVM
.getJVMSpecVersion() >= 17) {
2329 String jvmOpts
= conf
.get("yarn.app.mapreduce.am.command-opts", "");
2330 conf
.set("yarn.app.mapreduce.am.command-opts",
2331 jvmOpts
+ " --add-opens java.base/java.lang=ALL-UNNAMED");
2334 // Allow the user to override FS URI for this map-reduce cluster to use.
2336 new MiniMRCluster(servers
, FS_URI
!= null ? FS_URI
: FileSystem
.get(conf
).getUri().toString(),
2337 1, null, null, new JobConf(this.conf
));
2338 JobConf jobConf
= MapreduceTestingShim
.getJobConf(mrCluster
);
2339 if (jobConf
== null) {
2340 jobConf
= mrCluster
.createJobConf();
2343 // Hadoop MiniMR overwrites this while it should not
2344 jobConf
.set("mapreduce.cluster.local.dir", conf
.get("mapreduce.cluster.local.dir"));
2345 LOG
.info("Mini mapreduce cluster started");
2347 // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2348 // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
2349 // necessary config properties here. YARN-129 required adding a few properties.
2350 conf
.set("mapreduce.jobtracker.address", jobConf
.get("mapreduce.jobtracker.address"));
2351 // this for mrv2 support; mr1 ignores this
2352 conf
.set("mapreduce.framework.name", "yarn");
2353 conf
.setBoolean("yarn.is.minicluster", true);
2354 String rmAddress
= jobConf
.get("yarn.resourcemanager.address");
2355 if (rmAddress
!= null) {
2356 conf
.set("yarn.resourcemanager.address", rmAddress
);
2358 String historyAddress
= jobConf
.get("mapreduce.jobhistory.address");
2359 if (historyAddress
!= null) {
2360 conf
.set("mapreduce.jobhistory.address", historyAddress
);
2362 String schedulerAddress
= jobConf
.get("yarn.resourcemanager.scheduler.address");
2363 if (schedulerAddress
!= null) {
2364 conf
.set("yarn.resourcemanager.scheduler.address", schedulerAddress
);
2366 String mrJobHistoryWebappAddress
= jobConf
.get("mapreduce.jobhistory.webapp.address");
2367 if (mrJobHistoryWebappAddress
!= null) {
2368 conf
.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress
);
2370 String yarnRMWebappAddress
= jobConf
.get("yarn.resourcemanager.webapp.address");
2371 if (yarnRMWebappAddress
!= null) {
2372 conf
.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress
);
2377 * Stops the previously started <code>MiniMRCluster</code>.
2379 public void shutdownMiniMapReduceCluster() {
2380 if (mrCluster
!= null) {
2381 LOG
.info("Stopping mini mapreduce cluster...");
2382 mrCluster
.shutdown();
2384 LOG
.info("Mini mapreduce cluster stopped");
2386 // Restore configuration to point to local jobtracker
2387 conf
.set("mapreduce.jobtracker.address", "local");
2391 * Create a stubbed out RegionServerService, mainly for getting FS.
2393 public RegionServerServices
createMockRegionServerService() throws IOException
{
2394 return createMockRegionServerService((ServerName
) null);
2398 * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2399 * TestTokenAuthentication
2401 public RegionServerServices
createMockRegionServerService(RpcServerInterface rpc
)
2402 throws IOException
{
2403 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher());
2404 rss
.setFileSystem(getTestFileSystem());
2405 rss
.setRpcServer(rpc
);
2410 * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2411 * TestOpenRegionHandler
2413 public RegionServerServices
createMockRegionServerService(ServerName name
) throws IOException
{
2414 final MockRegionServerServices rss
= new MockRegionServerServices(getZooKeeperWatcher(), name
);
2415 rss
.setFileSystem(getTestFileSystem());
2420 * Expire the Master's session
2422 public void expireMasterSession() throws Exception
{
2423 HMaster master
= getMiniHBaseCluster().getMaster();
2424 expireSession(master
.getZooKeeper(), false);
2428 * Expire a region server's session
2429 * @param index which RS
2431 public void expireRegionServerSession(int index
) throws Exception
{
2432 HRegionServer rs
= getMiniHBaseCluster().getRegionServer(index
);
2433 expireSession(rs
.getZooKeeper(), false);
2434 decrementMinRegionServerCount();
2437 private void decrementMinRegionServerCount() {
2438 // decrement the count for this.conf, for newly spwaned master
2439 // this.hbaseCluster shares this configuration too
2440 decrementMinRegionServerCount(getConfiguration());
2442 // each master thread keeps a copy of configuration
2443 for (MasterThread master
: getHBaseCluster().getMasterThreads()) {
2444 decrementMinRegionServerCount(master
.getMaster().getConfiguration());
2448 private void decrementMinRegionServerCount(Configuration conf
) {
2449 int currentCount
= conf
.getInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, -1);
2450 if (currentCount
!= -1) {
2451 conf
.setInt(ServerManager
.WAIT_ON_REGIONSERVERS_MINTOSTART
, Math
.max(currentCount
- 1, 1));
2455 public void expireSession(ZKWatcher nodeZK
) throws Exception
{
2456 expireSession(nodeZK
, false);
2460 * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2461 * http://hbase.apache.org/book.html#trouble.zookeeper
2463 * There are issues when doing this:
2465 * <li>http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html</li>
2466 * <li>https://issues.apache.org/jira/browse/ZOOKEEPER-1105</li>
2468 * @param nodeZK - the ZK watcher to expire
2469 * @param checkStatus - true to check if we can create a Table with the current configuration.
2471 public void expireSession(ZKWatcher nodeZK
, boolean checkStatus
) throws Exception
{
2472 Configuration c
= new Configuration(this.conf
);
2473 String quorumServers
= ZKConfig
.getZKQuorumServersString(c
);
2474 ZooKeeper zk
= nodeZK
.getRecoverableZooKeeper().getZooKeeper();
2475 byte[] password
= zk
.getSessionPasswd();
2476 long sessionID
= zk
.getSessionId();
2478 // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2479 // so we create a first watcher to be sure that the
2480 // event was sent. We expect that if our watcher receives the event
2481 // other watchers on the same machine will get is as well.
2482 // When we ask to close the connection, ZK does not close it before
2483 // we receive all the events, so don't have to capture the event, just
2484 // closing the connection should be enough.
2485 ZooKeeper monitor
= new ZooKeeper(quorumServers
, 1000, new org
.apache
.zookeeper
.Watcher() {
2487 public void process(WatchedEvent watchedEvent
) {
2488 LOG
.info("Monitor ZKW received event=" + watchedEvent
);
2490 }, sessionID
, password
);
2494 new ZooKeeper(quorumServers
, 1000, EmptyWatcher
.instance
, sessionID
, password
);
2496 // ensure that we have connection to the server before closing down, otherwise
2497 // the close session event will be eaten out before we start CONNECTING state
2498 long start
= EnvironmentEdgeManager
.currentTime();
2499 while (newZK
.getState() != States
.CONNECTED
&&
2500 EnvironmentEdgeManager
.currentTime() - start
< 1000) {
2504 LOG
.info("ZK Closed Session 0x" + Long
.toHexString(sessionID
));
2506 // Now closing & waiting to be sure that the clients get it.
2510 getConnection().getTable(TableName
.META_TABLE_NAME
).close();
2515 * Get the Mini HBase cluster.
2516 * @return hbase cluster
2517 * @see #getHBaseClusterInterface()
2519 public SingleProcessHBaseCluster
getHBaseCluster() {
2520 return getMiniHBaseCluster();
2524 * Returns the HBaseCluster instance.
2526 * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this
2527 * should not assume that the cluster is a mini cluster or a distributed one. If the test only
2528 * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used
2529 * instead w/o the need to type-cast.
2531 public HBaseClusterInterface
getHBaseClusterInterface() {
2532 // implementation note: we should rename this method as #getHBaseCluster(),
2533 // but this would require refactoring 90+ calls.
2534 return hbaseCluster
;
2538 * Resets the connections so that the next time getConnection() is called, a new connection is
2539 * created. This is needed in cases where the entire cluster / all the masters are shutdown and
2540 * the connection is not valid anymore.
2542 * TODO: There should be a more coherent way of doing this. Unfortunately the way tests are
2543 * written, not all start() stop() calls go through this class. Most tests directly operate on the
2544 * underlying mini/local hbase cluster. That makes it difficult for this wrapper class to maintain
2545 * the connection state automatically. Cleaning this is a much bigger refactor.
2547 public void invalidateConnection() throws IOException
{
2549 // Update the master addresses if they changed.
2550 final String masterConfigBefore
= conf
.get(HConstants
.MASTER_ADDRS_KEY
);
2551 final String masterConfAfter
= getMiniHBaseCluster().getConf().get(HConstants
.MASTER_ADDRS_KEY
);
2552 LOG
.info("Invalidated connection. Updating master addresses before: {} after: {}",
2553 masterConfigBefore
, masterConfAfter
);
2554 conf
.set(HConstants
.MASTER_ADDRS_KEY
,
2555 getMiniHBaseCluster().getConf().get(HConstants
.MASTER_ADDRS_KEY
));
2559 * Get a shared Connection to the cluster. this method is thread safe.
2560 * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2562 public Connection
getConnection() throws IOException
{
2563 return getAsyncConnection().toConnection();
2567 * Get a assigned Connection to the cluster. this method is thread safe.
2568 * @param user assigned user
2569 * @return A Connection with assigned user.
2571 public Connection
getConnection(User user
) throws IOException
{
2572 return getAsyncConnection(user
).toConnection();
2576 * Get a shared AsyncClusterConnection to the cluster. this method is thread safe.
2577 * @return An AsyncClusterConnection that can be shared. Don't close. Will be closed on shutdown
2580 public AsyncClusterConnection
getAsyncConnection() throws IOException
{
2582 return asyncConnection
.updateAndGet(connection
-> {
2583 if (connection
== null) {
2585 User user
= UserProvider
.instantiate(conf
).getCurrent();
2586 connection
= getAsyncConnection(user
);
2587 } catch (IOException ioe
) {
2588 throw new UncheckedIOException("Failed to create connection", ioe
);
2593 } catch (UncheckedIOException exception
) {
2594 throw exception
.getCause();
2599 * Get a assigned AsyncClusterConnection to the cluster. this method is thread safe.
2600 * @param user assigned user
2601 * @return An AsyncClusterConnection with assigned user.
2603 public AsyncClusterConnection
getAsyncConnection(User user
) throws IOException
{
2604 return ClusterConnectionFactory
.createAsyncClusterConnection(conf
, null, user
);
2607 public void closeConnection() throws IOException
{
2608 if (hbaseAdmin
!= null) {
2609 Closeables
.close(hbaseAdmin
, true);
2612 AsyncClusterConnection asyncConnection
= this.asyncConnection
.getAndSet(null);
2613 if (asyncConnection
!= null) {
2614 Closeables
.close(asyncConnection
, true);
2619 * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing
2620 * it has no effect, it will be closed automatically when the cluster shutdowns
2622 public Admin
getAdmin() throws IOException
{
2623 if (hbaseAdmin
== null) {
2624 this.hbaseAdmin
= getConnection().getAdmin();
2629 private Admin hbaseAdmin
= null;
2632 * Returns an {@link Hbck} instance. Needs be closed when done.
2634 public Hbck
getHbck() throws IOException
{
2635 return getConnection().getHbck();
2639 * Unassign the named region.
2640 * @param regionName The region to unassign.
2642 public void unassignRegion(String regionName
) throws IOException
{
2643 unassignRegion(Bytes
.toBytes(regionName
));
2647 * Unassign the named region.
2648 * @param regionName The region to unassign.
2650 public void unassignRegion(byte[] regionName
) throws IOException
{
2651 getAdmin().unassign(regionName
);
2655 * Closes the region containing the given row.
2656 * @param row The row to find the containing region.
2657 * @param table The table to find the region.
2659 public void unassignRegionByRow(String row
, RegionLocator table
) throws IOException
{
2660 unassignRegionByRow(Bytes
.toBytes(row
), table
);
2664 * Closes the region containing the given row.
2665 * @param row The row to find the containing region.
2666 * @param table The table to find the region.
2667 * @throws IOException
2669 public void unassignRegionByRow(byte[] row
, RegionLocator table
) throws IOException
{
2670 HRegionLocation hrl
= table
.getRegionLocation(row
);
2671 unassignRegion(hrl
.getRegion().getRegionName());
2675 * Retrieves a splittable region randomly from tableName
2676 * @param tableName name of table
2677 * @param maxAttempts maximum number of attempts, unlimited for value of -1
2678 * @return the HRegion chosen, null if none was found within limit of maxAttempts
2680 public HRegion
getSplittableRegion(TableName tableName
, int maxAttempts
) {
2681 List
<HRegion
> regions
= getHBaseCluster().getRegions(tableName
);
2682 int regCount
= regions
.size();
2683 Set
<Integer
> attempted
= new HashSet
<>();
2687 regions
= getHBaseCluster().getRegions(tableName
);
2688 if (regCount
!= regions
.size()) {
2689 // if there was region movement, clear attempted Set
2692 regCount
= regions
.size();
2693 // There are chances that before we get the region for the table from an RS the region may
2694 // be going for CLOSE. This may be because online schema change is enabled
2696 idx
= random
.nextInt(regCount
);
2697 // if we have just tried this region, there is no need to try again
2698 if (attempted
.contains(idx
)) {
2701 HRegion region
= regions
.get(idx
);
2702 if (region
.checkSplit().isPresent()) {
2708 } while (maxAttempts
== -1 || attempts
< maxAttempts
);
2712 public MiniDFSCluster
getDFSCluster() {
2716 public void setDFSCluster(MiniDFSCluster cluster
) throws IllegalStateException
, IOException
{
2717 setDFSCluster(cluster
, true);
2721 * Set the MiniDFSCluster
2722 * @param cluster cluster to use
2723 * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it
2725 * @throws IllegalStateException if the passed cluster is up when it is required to be down
2726 * @throws IOException if the FileSystem could not be set from the passed dfs cluster
2728 public void setDFSCluster(MiniDFSCluster cluster
, boolean requireDown
)
2729 throws IllegalStateException
, IOException
{
2730 if (dfsCluster
!= null && requireDown
&& dfsCluster
.isClusterUp()) {
2731 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
2733 this.dfsCluster
= cluster
;
2737 public FileSystem
getTestFileSystem() throws IOException
{
2738 return HFileSystem
.get(conf
);
2742 * Wait until all regions in a table have been assigned. Waits default timeout before giving up
2744 * @param table Table to wait on.
2746 public void waitTableAvailable(TableName table
) throws InterruptedException
, IOException
{
2747 waitTableAvailable(table
.getName(), 30000);
2750 public void waitTableAvailable(TableName table
, long timeoutMillis
)
2751 throws InterruptedException
, IOException
{
2752 waitFor(timeoutMillis
, predicateTableAvailable(table
));
2756 * Wait until all regions in a table have been assigned
2757 * @param table Table to wait on.
2758 * @param timeoutMillis Timeout.
2760 public void waitTableAvailable(byte[] table
, long timeoutMillis
)
2761 throws InterruptedException
, IOException
{
2762 waitFor(timeoutMillis
, predicateTableAvailable(TableName
.valueOf(table
)));
2765 public String
explainTableAvailability(TableName tableName
) throws IOException
{
2767 new StringBuilder(explainTableState(tableName
, TableState
.State
.ENABLED
)).append(", ");
2768 if (getHBaseCluster().getMaster().isAlive()) {
2769 Map
<RegionInfo
, ServerName
> assignments
= getHBaseCluster().getMaster().getAssignmentManager()
2770 .getRegionStates().getRegionAssignments();
2771 final List
<Pair
<RegionInfo
, ServerName
>> metaLocations
=
2772 MetaTableAccessor
.getTableRegionsAndLocations(getConnection(), tableName
);
2773 for (Pair
<RegionInfo
, ServerName
> metaLocation
: metaLocations
) {
2774 RegionInfo hri
= metaLocation
.getFirst();
2775 ServerName sn
= metaLocation
.getSecond();
2776 if (!assignments
.containsKey(hri
)) {
2777 msg
.append(", region ").append(hri
)
2778 .append(" not assigned, but found in meta, it expected to be on ").append(sn
);
2779 } else if (sn
== null) {
2780 msg
.append(", region ").append(hri
).append(" assigned, but has no server in meta");
2781 } else if (!sn
.equals(assignments
.get(hri
))) {
2782 msg
.append(", region ").append(hri
)
2783 .append(" assigned, but has different servers in meta and AM ( ").append(sn
)
2784 .append(" <> ").append(assignments
.get(hri
));
2788 return msg
.toString();
2791 public String
explainTableState(final TableName table
, TableState
.State state
)
2792 throws IOException
{
2793 TableState tableState
= MetaTableAccessor
.getTableState(getConnection(), table
);
2794 if (tableState
== null) {
2795 return "TableState in META: No table state in META for table " + table
+
2796 " last state in meta (including deleted is " + findLastTableState(table
) + ")";
2797 } else if (!tableState
.inStates(state
)) {
2798 return "TableState in META: Not " + state
+ " state, but " + tableState
;
2800 return "TableState in META: OK";
2805 public TableState
findLastTableState(final TableName table
) throws IOException
{
2806 final AtomicReference
<TableState
> lastTableState
= new AtomicReference
<>(null);
2807 ClientMetaTableAccessor
.Visitor visitor
= new ClientMetaTableAccessor
.Visitor() {
2809 public boolean visit(Result r
) throws IOException
{
2810 if (!Arrays
.equals(r
.getRow(), table
.getName())) {
2813 TableState state
= CatalogFamilyFormat
.getTableState(r
);
2814 if (state
!= null) {
2815 lastTableState
.set(state
);
2820 MetaTableAccessor
.scanMeta(getConnection(), null, null, ClientMetaTableAccessor
.QueryType
.TABLE
,
2821 Integer
.MAX_VALUE
, visitor
);
2822 return lastTableState
.get();
2826 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
2827 * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent
2829 * @param table the table to wait on.
2830 * @throws InterruptedException if interrupted while waiting
2831 * @throws IOException if an IO problem is encountered
2833 public void waitTableEnabled(TableName table
) throws InterruptedException
, IOException
{
2834 waitTableEnabled(table
, 30000);
2838 * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
2839 * have been all assigned.
2840 * @see #waitTableEnabled(TableName, long)
2841 * @param table Table to wait on.
2842 * @param timeoutMillis Time to wait on it being marked enabled.
2844 public void waitTableEnabled(byte[] table
, long timeoutMillis
)
2845 throws InterruptedException
, IOException
{
2846 waitTableEnabled(TableName
.valueOf(table
), timeoutMillis
);
2849 public void waitTableEnabled(TableName table
, long timeoutMillis
) throws IOException
{
2850 waitFor(timeoutMillis
, predicateTableEnabled(table
));
2854 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout
2855 * after default period (30 seconds)
2856 * @param table Table to wait on.
2858 public void waitTableDisabled(byte[] table
) throws InterruptedException
, IOException
{
2859 waitTableDisabled(table
, 30000);
2862 public void waitTableDisabled(TableName table
, long millisTimeout
)
2863 throws InterruptedException
, IOException
{
2864 waitFor(millisTimeout
, predicateTableDisabled(table
));
2868 * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
2869 * @param table Table to wait on.
2870 * @param timeoutMillis Time to wait on it being marked disabled.
2872 public void waitTableDisabled(byte[] table
, long timeoutMillis
)
2873 throws InterruptedException
, IOException
{
2874 waitTableDisabled(TableName
.valueOf(table
), timeoutMillis
);
2878 * Make sure that at least the specified number of region servers are running
2879 * @param num minimum number of region servers that should be running
2880 * @return true if we started some servers
2882 public boolean ensureSomeRegionServersAvailable(final int num
) throws IOException
{
2883 boolean startedServer
= false;
2884 SingleProcessHBaseCluster hbaseCluster
= getMiniHBaseCluster();
2885 for (int i
= hbaseCluster
.getLiveRegionServerThreads().size(); i
< num
; ++i
) {
2886 LOG
.info("Started new server=" + hbaseCluster
.startRegionServer());
2887 startedServer
= true;
2890 return startedServer
;
2894 * Make sure that at least the specified number of region servers are running. We don't count the
2895 * ones that are currently stopping or are stopped.
2896 * @param num minimum number of region servers that should be running
2897 * @return true if we started some servers
2899 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num
) throws IOException
{
2900 boolean startedServer
= ensureSomeRegionServersAvailable(num
);
2902 int nonStoppedServers
= 0;
2903 for (JVMClusterUtil
.RegionServerThread rst
: getMiniHBaseCluster().getRegionServerThreads()) {
2905 HRegionServer hrs
= rst
.getRegionServer();
2906 if (hrs
.isStopping() || hrs
.isStopped()) {
2907 LOG
.info("A region server is stopped or stopping:" + hrs
);
2909 nonStoppedServers
++;
2912 for (int i
= nonStoppedServers
; i
< num
; ++i
) {
2913 LOG
.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
2914 startedServer
= true;
2916 return startedServer
;
2920 * This method clones the passed <code>c</code> configuration setting a new user into the clone.
2921 * Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos.
2922 * @param c Initial configuration
2923 * @param differentiatingSuffix Suffix to differentiate this user from others.
2924 * @return A new configuration instance with a different user set into it.
2926 public static User
getDifferentUser(final Configuration c
, final String differentiatingSuffix
)
2927 throws IOException
{
2928 FileSystem currentfs
= FileSystem
.get(c
);
2929 if (!(currentfs
instanceof DistributedFileSystem
) || User
.isHBaseSecurityEnabled(c
)) {
2930 return User
.getCurrent();
2932 // Else distributed filesystem. Make a new instance per daemon. Below
2933 // code is taken from the AppendTestUtil over in hdfs.
2934 String username
= User
.getCurrent().getName() + differentiatingSuffix
;
2935 User user
= User
.createUserForTesting(c
, username
, new String
[] { "supergroup" });
2939 public static NavigableSet
<String
> getAllOnlineRegions(SingleProcessHBaseCluster cluster
)
2940 throws IOException
{
2941 NavigableSet
<String
> online
= new TreeSet
<>();
2942 for (RegionServerThread rst
: cluster
.getLiveRegionServerThreads()) {
2944 for (RegionInfo region
: ProtobufUtil
2945 .getOnlineRegions(rst
.getRegionServer().getRSRpcServices())) {
2946 online
.add(region
.getRegionNameAsString());
2948 } catch (RegionServerStoppedException e
) {
2956 * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests
2957 * linger. Here is the exception you'll see:
2960 * 2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/wal.1276627923013 block
2961 * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
2962 * blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683
2963 * failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
2966 * @param stream A DFSClient.DFSOutputStream.
2968 public static void setMaxRecoveryErrorCount(final OutputStream stream
, final int max
) {
2970 Class
<?
>[] clazzes
= DFSClient
.class.getDeclaredClasses();
2971 for (Class
<?
> clazz
: clazzes
) {
2972 String className
= clazz
.getSimpleName();
2973 if (className
.equals("DFSOutputStream")) {
2974 if (clazz
.isInstance(stream
)) {
2975 Field maxRecoveryErrorCountField
=
2976 stream
.getClass().getDeclaredField("maxRecoveryErrorCount");
2977 maxRecoveryErrorCountField
.setAccessible(true);
2978 maxRecoveryErrorCountField
.setInt(stream
, max
);
2983 } catch (Exception e
) {
2984 LOG
.info("Could not set max recovery field", e
);
2989 * Uses directly the assignment manager to assign the region. and waits until the specified region
2990 * has completed assignment.
2991 * @return true if the region is assigned false otherwise.
2993 public boolean assignRegion(final RegionInfo regionInfo
)
2994 throws IOException
, InterruptedException
{
2995 final AssignmentManager am
= getHBaseCluster().getMaster().getAssignmentManager();
2996 am
.assign(regionInfo
);
2997 return AssignmentTestingUtil
.waitForAssignment(am
, regionInfo
);
3001 * Move region to destination server and wait till region is completely moved and online
3002 * @param destRegion region to move
3003 * @param destServer destination server of the region
3005 public void moveRegionAndWait(RegionInfo destRegion
, ServerName destServer
)
3006 throws InterruptedException
, IOException
{
3007 HMaster master
= getMiniHBaseCluster().getMaster();
3008 // TODO: Here we start the move. The move can take a while.
3009 getAdmin().move(destRegion
.getEncodedNameAsBytes(), destServer
);
3011 ServerName serverName
=
3012 master
.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion
);
3013 if (serverName
!= null && serverName
.equals(destServer
)) {
3014 assertRegionOnServer(destRegion
, serverName
, 2000);
3022 * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a
3023 * configuable timeout value (default is 60 seconds) This means all regions have been deployed,
3024 * master has been informed and updated hbase:meta with the regions deployed server.
3025 * @param tableName the table name
3027 public void waitUntilAllRegionsAssigned(final TableName tableName
) throws IOException
{
3028 waitUntilAllRegionsAssigned(tableName
,
3029 this.conf
.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3033 * Waith until all system table's regions get assigned
3035 public void waitUntilAllSystemRegionsAssigned() throws IOException
{
3036 waitUntilAllRegionsAssigned(TableName
.META_TABLE_NAME
);
3040 * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until
3041 * timeout. This means all regions have been deployed, master has been informed and updated
3042 * hbase:meta with the regions deployed server.
3043 * @param tableName the table name
3044 * @param timeout timeout, in milliseconds
3046 public void waitUntilAllRegionsAssigned(final TableName tableName
, final long timeout
)
3047 throws IOException
{
3048 if (!TableName
.isMetaTableName(tableName
)) {
3049 try (final Table meta
= getConnection().getTable(TableName
.META_TABLE_NAME
)) {
3050 LOG
.debug("Waiting until all regions of table " + tableName
+ " get assigned. Timeout = " +
3052 waitFor(timeout
, 200, true, new ExplainingPredicate
<IOException
>() {
3054 public String
explainFailure() throws IOException
{
3055 return explainTableAvailability(tableName
);
3059 public boolean evaluate() throws IOException
{
3060 Scan scan
= new Scan();
3061 scan
.addFamily(HConstants
.CATALOG_FAMILY
);
3062 boolean tableFound
= false;
3063 try (ResultScanner s
= meta
.getScanner(scan
)) {
3064 for (Result r
; (r
= s
.next()) != null;) {
3065 byte[] b
= r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.REGIONINFO_QUALIFIER
);
3066 RegionInfo info
= RegionInfo
.parseFromOrNull(b
);
3067 if (info
!= null && info
.getTable().equals(tableName
)) {
3068 // Get server hosting this region from catalog family. Return false if no server
3069 // hosting this region, or if the server hosting this region was recently killed
3070 // (for fault tolerance testing).
3073 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.SERVER_QUALIFIER
);
3074 if (server
== null) {
3078 r
.getValue(HConstants
.CATALOG_FAMILY
, HConstants
.STARTCODE_QUALIFIER
);
3079 ServerName serverName
=
3080 ServerName
.valueOf(Bytes
.toString(server
).replaceFirst(":", ",") + "," +
3081 Bytes
.toLong(startCode
));
3082 if (!getHBaseClusterInterface().isDistributedCluster() &&
3083 getHBaseCluster().isKilledRS(serverName
)) {
3087 if (RegionStateStore
.getRegionState(r
, info
) != RegionState
.State
.OPEN
) {
3095 "Didn't find the entries for table " + tableName
+ " in meta, already deleted?");
3102 LOG
.info("All regions for table " + tableName
+ " assigned to meta. Checking AM states.");
3103 // check from the master state if we are using a mini cluster
3104 if (!getHBaseClusterInterface().isDistributedCluster()) {
3105 // So, all regions are in the meta table but make sure master knows of the assignments before
3106 // returning -- sometimes this can lag.
3107 HMaster master
= getHBaseCluster().getMaster();
3108 final RegionStates states
= master
.getAssignmentManager().getRegionStates();
3109 waitFor(timeout
, 200, new ExplainingPredicate
<IOException
>() {
3111 public String
explainFailure() throws IOException
{
3112 return explainTableAvailability(tableName
);
3116 public boolean evaluate() throws IOException
{
3117 List
<RegionInfo
> hris
= states
.getRegionsOfTable(tableName
);
3118 return hris
!= null && !hris
.isEmpty();
3122 LOG
.info("All regions for table " + tableName
+ " assigned.");
3126 * Do a small get/scan against one store. This is required because store has no actual methods of
3127 * querying itself, and relies on StoreScanner.
3129 public static List
<Cell
> getFromStoreFile(HStore store
, Get get
) throws IOException
{
3130 Scan scan
= new Scan(get
);
3131 InternalScanner scanner
= (InternalScanner
) store
.getScanner(scan
,
3132 scan
.getFamilyMap().get(store
.getColumnFamilyDescriptor().getName()),
3133 // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3137 List
<Cell
> result
= new ArrayList
<>();
3138 scanner
.next(result
);
3139 if (!result
.isEmpty()) {
3140 // verify that we are on the row we want:
3141 Cell kv
= result
.get(0);
3142 if (!CellUtil
.matchingRows(kv
, get
.getRow())) {
3151 * Create region split keys between startkey and endKey
3152 * @param numRegions the number of regions to be created. it has to be greater than 3.
3153 * @return resulting split keys
3155 public byte[][] getRegionSplitStartKeys(byte[] startKey
, byte[] endKey
, int numRegions
) {
3156 assertTrue(numRegions
> 3);
3157 byte[][] tmpSplitKeys
= Bytes
.split(startKey
, endKey
, numRegions
- 3);
3158 byte[][] result
= new byte[tmpSplitKeys
.length
+ 1][];
3159 System
.arraycopy(tmpSplitKeys
, 0, result
, 1, tmpSplitKeys
.length
);
3160 result
[0] = HConstants
.EMPTY_BYTE_ARRAY
;
3165 * Do a small get/scan against one store. This is required because store has no actual methods of
3166 * querying itself, and relies on StoreScanner.
3168 public static List
<Cell
> getFromStoreFile(HStore store
, byte[] row
, NavigableSet
<byte[]> columns
)
3169 throws IOException
{
3170 Get get
= new Get(row
);
3171 Map
<byte[], NavigableSet
<byte[]>> s
= get
.getFamilyMap();
3172 s
.put(store
.getColumnFamilyDescriptor().getName(), columns
);
3174 return getFromStoreFile(store
, get
);
3177 public static void assertKVListsEqual(String additionalMsg
, final List
<?
extends Cell
> expected
,
3178 final List
<?
extends Cell
> actual
) {
3179 final int eLen
= expected
.size();
3180 final int aLen
= actual
.size();
3181 final int minLen
= Math
.min(eLen
, aLen
);
3184 while (i
< minLen
&&
3185 CellComparator
.getInstance().compare(expected
.get(i
), actual
.get(i
)) == 0) {
3189 if (additionalMsg
== null) {
3192 if (!additionalMsg
.isEmpty()) {
3193 additionalMsg
= ". " + additionalMsg
;
3196 if (eLen
!= aLen
|| i
!= minLen
) {
3197 throw new AssertionError("Expected and actual KV arrays differ at position " + i
+ ": " +
3198 safeGetAsStr(expected
, i
) + " (length " + eLen
+ ") vs. " + safeGetAsStr(actual
, i
) +
3199 " (length " + aLen
+ ")" + additionalMsg
);
3203 public static <T
> String
safeGetAsStr(List
<T
> lst
, int i
) {
3204 if (0 <= i
&& i
< lst
.size()) {
3205 return lst
.get(i
).toString();
3207 return "<out_of_range>";
3211 public String
getClusterKey() {
3212 return conf
.get(HConstants
.ZOOKEEPER_QUORUM
) + ":" +
3213 conf
.get(HConstants
.ZOOKEEPER_CLIENT_PORT
) + ":" +
3214 conf
.get(HConstants
.ZOOKEEPER_ZNODE_PARENT
, HConstants
.DEFAULT_ZOOKEEPER_ZNODE_PARENT
);
3218 * Creates a random table with the given parameters
3220 public Table
createRandomTable(TableName tableName
, final Collection
<String
> families
,
3221 final int maxVersions
, final int numColsPerRow
, final int numFlushes
, final int numRegions
,
3222 final int numRowsPerFlush
) throws IOException
, InterruptedException
{
3223 LOG
.info("\n\nCreating random table " + tableName
+ " with " + numRegions
+ " regions, " +
3224 numFlushes
+ " storefiles per region, " + numRowsPerFlush
+ " rows per flush, maxVersions=" +
3225 maxVersions
+ "\n");
3227 final Random rand
= new Random(tableName
.hashCode() * 17L + 12938197137L);
3228 final int numCF
= families
.size();
3229 final byte[][] cfBytes
= new byte[numCF
][];
3232 for (String cf
: families
) {
3233 cfBytes
[cfIndex
++] = Bytes
.toBytes(cf
);
3237 final int actualStartKey
= 0;
3238 final int actualEndKey
= Integer
.MAX_VALUE
;
3239 final int keysPerRegion
= (actualEndKey
- actualStartKey
) / numRegions
;
3240 final int splitStartKey
= actualStartKey
+ keysPerRegion
;
3241 final int splitEndKey
= actualEndKey
- keysPerRegion
;
3242 final String keyFormat
= "%08x";
3243 final Table table
= createTable(tableName
, cfBytes
, maxVersions
,
3244 Bytes
.toBytes(String
.format(keyFormat
, splitStartKey
)),
3245 Bytes
.toBytes(String
.format(keyFormat
, splitEndKey
)), numRegions
);
3247 if (hbaseCluster
!= null) {
3248 getMiniHBaseCluster().flushcache(TableName
.META_TABLE_NAME
);
3251 BufferedMutator mutator
= getConnection().getBufferedMutator(tableName
);
3253 for (int iFlush
= 0; iFlush
< numFlushes
; ++iFlush
) {
3254 for (int iRow
= 0; iRow
< numRowsPerFlush
; ++iRow
) {
3255 final byte[] row
= Bytes
.toBytes(
3256 String
.format(keyFormat
, actualStartKey
+ rand
.nextInt(actualEndKey
- actualStartKey
)));
3258 Put put
= new Put(row
);
3259 Delete del
= new Delete(row
);
3260 for (int iCol
= 0; iCol
< numColsPerRow
; ++iCol
) {
3261 final byte[] cf
= cfBytes
[rand
.nextInt(numCF
)];
3262 final long ts
= rand
.nextInt();
3263 final byte[] qual
= Bytes
.toBytes("col" + iCol
);
3264 if (rand
.nextBoolean()) {
3265 final byte[] value
=
3266 Bytes
.toBytes("value_for_row_" + iRow
+ "_cf_" + Bytes
.toStringBinary(cf
) + "_col_" +
3267 iCol
+ "_ts_" + ts
+ "_random_" + rand
.nextLong());
3268 put
.addColumn(cf
, qual
, ts
, value
);
3269 } else if (rand
.nextDouble() < 0.8) {
3270 del
.addColumn(cf
, qual
, ts
);
3272 del
.addColumns(cf
, qual
, ts
);
3276 if (!put
.isEmpty()) {
3277 mutator
.mutate(put
);
3280 if (!del
.isEmpty()) {
3281 mutator
.mutate(del
);
3284 LOG
.info("Initiating flush #" + iFlush
+ " for table " + tableName
);
3286 if (hbaseCluster
!= null) {
3287 getMiniHBaseCluster().flushcache(table
.getName());
3295 public static int randomFreePort() {
3296 return HBaseCommonTestingUtil
.randomFreePort();
3299 public static String
randomMultiCastAddress() {
3300 return "226.1.1." + random
.nextInt(254);
3303 public static void waitForHostPort(String host
, int port
) throws IOException
{
3304 final int maxTimeMs
= 10000;
3305 final int maxNumAttempts
= maxTimeMs
/ HConstants
.SOCKET_RETRY_WAIT_MS
;
3306 IOException savedException
= null;
3307 LOG
.info("Waiting for server at " + host
+ ":" + port
);
3308 for (int attempt
= 0; attempt
< maxNumAttempts
; ++attempt
) {
3310 Socket sock
= new Socket(InetAddress
.getByName(host
), port
);
3312 savedException
= null;
3313 LOG
.info("Server at " + host
+ ":" + port
+ " is available");
3315 } catch (UnknownHostException e
) {
3316 throw new IOException("Failed to look up " + host
, e
);
3317 } catch (IOException e
) {
3320 Threads
.sleepWithoutInterrupt(HConstants
.SOCKET_RETRY_WAIT_MS
);
3323 if (savedException
!= null) {
3324 throw savedException
;
3329 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3331 * @return the number of regions the table was split into
3333 public static int createPreSplitLoadTestTable(Configuration conf
, TableName tableName
,
3334 byte[] columnFamily
, Algorithm compression
, DataBlockEncoding dataBlockEncoding
)
3335 throws IOException
{
3336 return createPreSplitLoadTestTable(conf
, tableName
, columnFamily
, compression
,
3337 dataBlockEncoding
, DEFAULT_REGIONS_PER_SERVER
, 1, Durability
.USE_DEFAULT
);
3341 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3343 * @return the number of regions the table was split into
3345 public static int createPreSplitLoadTestTable(Configuration conf
, TableName tableName
,
3346 byte[] columnFamily
, Algorithm compression
, DataBlockEncoding dataBlockEncoding
,
3347 int numRegionsPerServer
, int regionReplication
, Durability durability
) throws IOException
{
3348 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
3349 builder
.setDurability(durability
);
3350 builder
.setRegionReplication(regionReplication
);
3351 ColumnFamilyDescriptorBuilder cfBuilder
=
3352 ColumnFamilyDescriptorBuilder
.newBuilder(columnFamily
);
3353 cfBuilder
.setDataBlockEncoding(dataBlockEncoding
);
3354 cfBuilder
.setCompressionType(compression
);
3355 return createPreSplitLoadTestTable(conf
, builder
.build(), cfBuilder
.build(),
3356 numRegionsPerServer
);
3360 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3362 * @return the number of regions the table was split into
3364 public static int createPreSplitLoadTestTable(Configuration conf
, TableName tableName
,
3365 byte[][] columnFamilies
, Algorithm compression
, DataBlockEncoding dataBlockEncoding
,
3366 int numRegionsPerServer
, int regionReplication
, Durability durability
) throws IOException
{
3367 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName
);
3368 builder
.setDurability(durability
);
3369 builder
.setRegionReplication(regionReplication
);
3370 ColumnFamilyDescriptor
[] hcds
= new ColumnFamilyDescriptor
[columnFamilies
.length
];
3371 for (int i
= 0; i
< columnFamilies
.length
; i
++) {
3372 ColumnFamilyDescriptorBuilder cfBuilder
=
3373 ColumnFamilyDescriptorBuilder
.newBuilder(columnFamilies
[i
]);
3374 cfBuilder
.setDataBlockEncoding(dataBlockEncoding
);
3375 cfBuilder
.setCompressionType(compression
);
3376 hcds
[i
] = cfBuilder
.build();
3378 return createPreSplitLoadTestTable(conf
, builder
.build(), hcds
, numRegionsPerServer
);
3382 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3384 * @return the number of regions the table was split into
3386 public static int createPreSplitLoadTestTable(Configuration conf
, TableDescriptor desc
,
3387 ColumnFamilyDescriptor hcd
) throws IOException
{
3388 return createPreSplitLoadTestTable(conf
, desc
, hcd
, DEFAULT_REGIONS_PER_SERVER
);
3392 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3394 * @return the number of regions the table was split into
3396 public static int createPreSplitLoadTestTable(Configuration conf
, TableDescriptor desc
,
3397 ColumnFamilyDescriptor hcd
, int numRegionsPerServer
) throws IOException
{
3398 return createPreSplitLoadTestTable(conf
, desc
, new ColumnFamilyDescriptor
[] { hcd
},
3399 numRegionsPerServer
);
3403 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3405 * @return the number of regions the table was split into
3407 public static int createPreSplitLoadTestTable(Configuration conf
, TableDescriptor desc
,
3408 ColumnFamilyDescriptor
[] hcds
, int numRegionsPerServer
) throws IOException
{
3409 return createPreSplitLoadTestTable(conf
, desc
, hcds
, new RegionSplitter
.HexStringSplit(),
3410 numRegionsPerServer
);
3414 * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3416 * @return the number of regions the table was split into
3418 public static int createPreSplitLoadTestTable(Configuration conf
, TableDescriptor td
,
3419 ColumnFamilyDescriptor
[] cds
, SplitAlgorithm splitter
, int numRegionsPerServer
)
3420 throws IOException
{
3421 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(td
);
3422 for (ColumnFamilyDescriptor cd
: cds
) {
3423 if (!td
.hasColumnFamily(cd
.getName())) {
3424 builder
.setColumnFamily(cd
);
3427 td
= builder
.build();
3428 int totalNumberOfRegions
= 0;
3429 Connection unmanagedConnection
= ConnectionFactory
.createConnection(conf
);
3430 Admin admin
= unmanagedConnection
.getAdmin();
3433 // create a table a pre-splits regions.
3434 // The number of splits is set as:
3435 // region servers * regions per region server).
3436 int numberOfServers
= admin
.getRegionServers().size();
3437 if (numberOfServers
== 0) {
3438 throw new IllegalStateException("No live regionservers");
3441 totalNumberOfRegions
= numberOfServers
* numRegionsPerServer
;
3443 "Number of live regionservers: " + numberOfServers
+ ", " + "pre-splitting table into " +
3444 totalNumberOfRegions
+ " regions " + "(regions per server: " + numRegionsPerServer
+ ")");
3446 byte[][] splits
= splitter
.split(totalNumberOfRegions
);
3448 admin
.createTable(td
, splits
);
3449 } catch (MasterNotRunningException e
) {
3450 LOG
.error("Master not running", e
);
3451 throw new IOException(e
);
3452 } catch (TableExistsException e
) {
3453 LOG
.warn("Table " + td
.getTableName() + " already exists, continuing");
3456 unmanagedConnection
.close();
3458 return totalNumberOfRegions
;
3461 public static int getMetaRSPort(Connection connection
) throws IOException
{
3462 try (RegionLocator locator
= connection
.getRegionLocator(TableName
.META_TABLE_NAME
)) {
3463 return locator
.getRegionLocation(Bytes
.toBytes("")).getPort();
3468 * Due to async racing issue, a region may not be in the online region list of a region server
3469 * yet, after the assignment znode is deleted and the new assignment is recorded in master.
3471 public void assertRegionOnServer(final RegionInfo hri
, final ServerName server
,
3472 final long timeout
) throws IOException
, InterruptedException
{
3473 long timeoutTime
= EnvironmentEdgeManager
.currentTime() + timeout
;
3475 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
3476 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) return;
3477 long now
= EnvironmentEdgeManager
.currentTime();
3478 if (now
> timeoutTime
) break;
3481 fail("Could not find region " + hri
.getRegionNameAsString() + " on server " + server
);
3485 * Check to make sure the region is open on the specified region server, but not on any other one.
3487 public void assertRegionOnlyOnServer(final RegionInfo hri
, final ServerName server
,
3488 final long timeout
) throws IOException
, InterruptedException
{
3489 long timeoutTime
= EnvironmentEdgeManager
.currentTime() + timeout
;
3491 List
<RegionInfo
> regions
= getAdmin().getRegions(server
);
3492 if (regions
.stream().anyMatch(r
-> RegionInfo
.COMPARATOR
.compare(r
, hri
) == 0)) {
3493 List
<JVMClusterUtil
.RegionServerThread
> rsThreads
=
3494 getHBaseCluster().getLiveRegionServerThreads();
3495 for (JVMClusterUtil
.RegionServerThread rsThread
: rsThreads
) {
3496 HRegionServer rs
= rsThread
.getRegionServer();
3497 if (server
.equals(rs
.getServerName())) {
3500 Collection
<HRegion
> hrs
= rs
.getOnlineRegionsLocalContext();
3501 for (HRegion r
: hrs
) {
3502 assertTrue("Region should not be double assigned",
3503 r
.getRegionInfo().getRegionId() != hri
.getRegionId());
3506 return; // good, we are happy
3508 long now
= EnvironmentEdgeManager
.currentTime();
3509 if (now
> timeoutTime
) break;
3512 fail("Could not find region " + hri
.getRegionNameAsString() + " on server " + server
);
3515 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
) throws IOException
{
3516 TableDescriptor td
=
3517 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
3518 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
3519 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
);
3522 public HRegion
createTestRegion(String tableName
, ColumnFamilyDescriptor cd
,
3523 BlockCache blockCache
) throws IOException
{
3524 TableDescriptor td
=
3525 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(tableName
)).setColumnFamily(cd
).build();
3526 RegionInfo info
= RegionInfoBuilder
.newBuilder(TableName
.valueOf(tableName
)).build();
3527 return createRegionAndWAL(info
, getDataTestDir(), getConfiguration(), td
, blockCache
);
3530 public static void setFileSystemURI(String fsURI
) {
3535 * Returns a {@link Predicate} for checking that there are no regions in transition in master
3537 public ExplainingPredicate
<IOException
> predicateNoRegionsInTransition() {
3538 return new ExplainingPredicate
<IOException
>() {
3540 public String
explainFailure() throws IOException
{
3541 final RegionStates regionStates
=
3542 getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
3543 return "found in transition: " + regionStates
.getRegionsInTransition().toString();
3547 public boolean evaluate() throws IOException
{
3548 HMaster master
= getMiniHBaseCluster().getMaster();
3549 if (master
== null) return false;
3550 AssignmentManager am
= master
.getAssignmentManager();
3551 if (am
== null) return false;
3552 return !am
.hasRegionsInTransition();
3558 * Returns a {@link Predicate} for checking that table is enabled
3560 public Waiter
.Predicate
<IOException
> predicateTableEnabled(final TableName tableName
) {
3561 return new ExplainingPredicate
<IOException
>() {
3563 public String
explainFailure() throws IOException
{
3564 return explainTableState(tableName
, TableState
.State
.ENABLED
);
3568 public boolean evaluate() throws IOException
{
3569 return getAdmin().tableExists(tableName
) && getAdmin().isTableEnabled(tableName
);
3575 * Returns a {@link Predicate} for checking that table is enabled
3577 public Waiter
.Predicate
<IOException
> predicateTableDisabled(final TableName tableName
) {
3578 return new ExplainingPredicate
<IOException
>() {
3580 public String
explainFailure() throws IOException
{
3581 return explainTableState(tableName
, TableState
.State
.DISABLED
);
3585 public boolean evaluate() throws IOException
{
3586 return getAdmin().isTableDisabled(tableName
);
3592 * Returns a {@link Predicate} for checking that table is enabled
3594 public Waiter
.Predicate
<IOException
> predicateTableAvailable(final TableName tableName
) {
3595 return new ExplainingPredicate
<IOException
>() {
3597 public String
explainFailure() throws IOException
{
3598 return explainTableAvailability(tableName
);
3602 public boolean evaluate() throws IOException
{
3603 boolean tableAvailable
= getAdmin().isTableAvailable(tableName
);
3604 if (tableAvailable
) {
3605 try (Table table
= getConnection().getTable(tableName
)) {
3606 TableDescriptor htd
= table
.getDescriptor();
3607 for (HRegionLocation loc
: getConnection().getRegionLocator(tableName
)
3608 .getAllRegionLocations()) {
3609 Scan scan
= new Scan().withStartRow(loc
.getRegion().getStartKey())
3610 .withStopRow(loc
.getRegion().getEndKey()).setOneRowLimit()
3611 .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
3612 for (byte[] family
: htd
.getColumnFamilyNames()) {
3613 scan
.addFamily(family
);
3615 try (ResultScanner scanner
= table
.getScanner(scan
)) {
3621 return tableAvailable
;
3627 * Wait until no regions in transition.
3628 * @param timeout How long to wait.
3630 public void waitUntilNoRegionsInTransition(final long timeout
) throws IOException
{
3631 waitFor(timeout
, predicateNoRegionsInTransition());
3635 * Wait until no regions in transition. (time limit 15min)
3636 * @throws IOException
3638 public void waitUntilNoRegionsInTransition() throws IOException
{
3639 waitUntilNoRegionsInTransition(15 * 60000);
3643 * Wait until labels is ready in VisibilityLabelsCache.
3645 public void waitLabelAvailable(long timeoutMillis
, final String
... labels
) {
3646 final VisibilityLabelsCache labelsCache
= VisibilityLabelsCache
.get();
3647 waitFor(timeoutMillis
, new Waiter
.ExplainingPredicate
<RuntimeException
>() {
3650 public boolean evaluate() {
3651 for (String label
: labels
) {
3652 if (labelsCache
.getLabelOrdinal(label
) == 0) {
3660 public String
explainFailure() {
3661 for (String label
: labels
) {
3662 if (labelsCache
.getLabelOrdinal(label
) == 0) {
3663 return label
+ " is not available yet";
3672 * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
3674 * @return the list of column descriptors
3676 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors() {
3677 return generateColumnDescriptors("");
3681 * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
3683 * @param prefix family names prefix
3684 * @return the list of column descriptors
3686 public static List
<ColumnFamilyDescriptor
> generateColumnDescriptors(final String prefix
) {
3687 List
<ColumnFamilyDescriptor
> columnFamilyDescriptors
= new ArrayList
<>();
3689 for (Compression
.Algorithm compressionType
: getSupportedCompressionAlgorithms()) {
3690 for (DataBlockEncoding encodingType
: DataBlockEncoding
.values()) {
3691 for (BloomType bloomType
: BloomType
.values()) {
3692 String name
= String
.format("%s-cf-!@#&-%d!@#", prefix
, familyId
);
3693 ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder
=
3694 ColumnFamilyDescriptorBuilder
.newBuilder(Bytes
.toBytes(name
));
3695 columnFamilyDescriptorBuilder
.setCompressionType(compressionType
);
3696 columnFamilyDescriptorBuilder
.setDataBlockEncoding(encodingType
);
3697 columnFamilyDescriptorBuilder
.setBloomFilterType(bloomType
);
3698 columnFamilyDescriptors
.add(columnFamilyDescriptorBuilder
.build());
3703 return columnFamilyDescriptors
;
3707 * Get supported compression algorithms.
3708 * @return supported compression algorithms.
3710 public static Compression
.Algorithm
[] getSupportedCompressionAlgorithms() {
3711 String
[] allAlgos
= HFile
.getSupportedCompressionAlgorithms();
3712 List
<Compression
.Algorithm
> supportedAlgos
= new ArrayList
<>();
3713 for (String algoName
: allAlgos
) {
3715 Compression
.Algorithm algo
= Compression
.getCompressionAlgorithmByName(algoName
);
3716 algo
.getCompressor();
3717 supportedAlgos
.add(algo
);
3718 } catch (Throwable t
) {
3719 // this algo is not available
3722 return supportedAlgos
.toArray(new Algorithm
[supportedAlgos
.size()]);
3725 public Result
getClosestRowBefore(Region r
, byte[] row
, byte[] family
) throws IOException
{
3726 Scan scan
= new Scan().withStartRow(row
);
3727 scan
.setReadType(ReadType
.PREAD
);
3729 scan
.setReversed(true);
3730 scan
.addFamily(family
);
3731 try (RegionScanner scanner
= r
.getScanner(scan
)) {
3732 List
<Cell
> cells
= new ArrayList
<>(1);
3733 scanner
.next(cells
);
3734 if (r
.getRegionInfo().isMetaRegion() && !isTargetTable(row
, cells
.get(0))) {
3737 return Result
.create(cells
);
3741 private boolean isTargetTable(final byte[] inRow
, Cell c
) {
3742 String inputRowString
= Bytes
.toString(inRow
);
3743 int i
= inputRowString
.indexOf(HConstants
.DELIMITER
);
3744 String outputRowString
= Bytes
.toString(c
.getRowArray(), c
.getRowOffset(), c
.getRowLength());
3745 int o
= outputRowString
.indexOf(HConstants
.DELIMITER
);
3746 return inputRowString
.substring(0, i
).equals(outputRowString
.substring(0, o
));
3750 * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given
3751 * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use
3752 * kerby KDC server and utility for using it,
3753 * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
3754 * less baggage. It came in in HBASE-5291.
3756 public MiniKdc
setupMiniKdc(File keytabFile
) throws Exception
{
3757 Properties conf
= MiniKdc
.createConf();
3758 conf
.put(MiniKdc
.DEBUG
, true);
3761 // There is time lag between selecting a port and trying to bind with it. It's possible that
3762 // another service captures the port in between which'll result in BindException.
3763 boolean bindException
;
3767 bindException
= false;
3768 dir
= new File(getDataTestDir("kdc").toUri().getPath());
3769 kdc
= new MiniKdc(conf
, dir
);
3771 } catch (BindException e
) {
3772 FileUtils
.deleteDirectory(dir
); // clean directory
3774 if (numTries
== 3) {
3775 LOG
.error("Failed setting up MiniKDC. Tried " + numTries
+ " times.");
3778 LOG
.error("BindException encountered when setting up MiniKdc. Trying again.");
3779 bindException
= true;
3781 } while (bindException
);
3782 HBaseKerberosUtils
.setKeytabFileForTesting(keytabFile
.getAbsolutePath());
3786 public int getNumHFiles(final TableName tableName
, final byte[] family
) {
3788 for (RegionServerThread regionServerThread
: getMiniHBaseCluster().getRegionServerThreads()) {
3789 numHFiles
+= getNumHFilesForRS(regionServerThread
.getRegionServer(), tableName
, family
);
3794 public int getNumHFilesForRS(final HRegionServer rs
, final TableName tableName
,
3795 final byte[] family
) {
3797 for (Region region
: rs
.getRegions(tableName
)) {
3798 numHFiles
+= region
.getStore(family
).getStorefilesCount();
3803 public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd
, TableDescriptor rtd
) {
3804 assertEquals(ltd
.getValues().hashCode(), rtd
.getValues().hashCode());
3805 Collection
<ColumnFamilyDescriptor
> ltdFamilies
= Arrays
.asList(ltd
.getColumnFamilies());
3806 Collection
<ColumnFamilyDescriptor
> rtdFamilies
= Arrays
.asList(rtd
.getColumnFamilies());
3807 assertEquals(ltdFamilies
.size(), rtdFamilies
.size());
3808 for (Iterator
<ColumnFamilyDescriptor
> it
= ltdFamilies
.iterator(),
3809 it2
= rtdFamilies
.iterator(); it
.hasNext();) {
3810 assertEquals(0, ColumnFamilyDescriptor
.COMPARATOR
.compare(it
.next(), it2
.next()));
3815 * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between
3818 public static void await(final long sleepMillis
, final BooleanSupplier condition
)
3819 throws InterruptedException
{
3821 while (!condition
.getAsBoolean()) {
3822 Thread
.sleep(sleepMillis
);
3824 } catch (RuntimeException e
) {
3825 if (e
.getCause() instanceof AssertionError
) {
3826 throw (AssertionError
) e
.getCause();