HBASE-26582 Prune use of Random and SecureRandom objects (#4118)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestEndToEndSplitTransaction.java
blobe5722d53bcb13312dee49142ce23fdae9343d29e
1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.junit.Assert.assertArrayEquals;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertTrue;
25 import java.io.IOException;
26 import java.util.ArrayList;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.Random;
30 import java.util.Set;
31 import java.util.TreeSet;
32 import java.util.concurrent.ThreadLocalRandom;
33 import java.util.stream.Collectors;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.hbase.CatalogFamilyFormat;
36 import org.apache.hadoop.hbase.ChoreService;
37 import org.apache.hadoop.hbase.HBaseClassTestRule;
38 import org.apache.hadoop.hbase.HBaseTestingUtil;
39 import org.apache.hadoop.hbase.HConstants;
40 import org.apache.hadoop.hbase.HRegionLocation;
41 import org.apache.hadoop.hbase.MetaTableAccessor;
42 import org.apache.hadoop.hbase.NotServingRegionException;
43 import org.apache.hadoop.hbase.ScheduledChore;
44 import org.apache.hadoop.hbase.Stoppable;
45 import org.apache.hadoop.hbase.TableName;
46 import org.apache.hadoop.hbase.Waiter;
47 import org.apache.hadoop.hbase.client.Admin;
48 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
49 import org.apache.hadoop.hbase.client.Connection;
50 import org.apache.hadoop.hbase.client.ConnectionFactory;
51 import org.apache.hadoop.hbase.client.Get;
52 import org.apache.hadoop.hbase.client.Put;
53 import org.apache.hadoop.hbase.client.RegionInfo;
54 import org.apache.hadoop.hbase.client.RegionLocator;
55 import org.apache.hadoop.hbase.client.Result;
56 import org.apache.hadoop.hbase.client.Table;
57 import org.apache.hadoop.hbase.client.TableDescriptor;
58 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
59 import org.apache.hadoop.hbase.testclassification.LargeTests;
60 import org.apache.hadoop.hbase.util.Bytes;
61 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
62 import org.apache.hadoop.hbase.util.Pair;
63 import org.apache.hadoop.hbase.util.PairOfSameType;
64 import org.apache.hadoop.hbase.util.StoppableImplementation;
65 import org.apache.hadoop.hbase.util.Threads;
66 import org.junit.AfterClass;
67 import org.junit.BeforeClass;
68 import org.junit.ClassRule;
69 import org.junit.Rule;
70 import org.junit.Test;
71 import org.junit.experimental.categories.Category;
72 import org.junit.rules.TestName;
73 import org.slf4j.Logger;
74 import org.slf4j.LoggerFactory;
76 import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
77 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
78 import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
80 @Category(LargeTests.class)
81 public class TestEndToEndSplitTransaction {
83 @ClassRule
84 public static final HBaseClassTestRule CLASS_RULE =
85 HBaseClassTestRule.forClass(TestEndToEndSplitTransaction.class);
87 private static final Logger LOG = LoggerFactory.getLogger(TestEndToEndSplitTransaction.class);
88 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
89 private static final Configuration CONF = TEST_UTIL.getConfiguration();
91 @Rule
92 public TestName name = new TestName();
94 @BeforeClass
95 public static void beforeAllTests() throws Exception {
96 TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
97 TEST_UTIL.startMiniCluster(1);
100 @AfterClass
101 public static void afterAllTests() throws Exception {
102 TEST_UTIL.shutdownMiniCluster();
106 * This is the test for : HBASE-20940 This test will split the region and try to open an reference
107 * over store file. Once store file has any reference, it makes sure that region can't be split
109 @Test
110 public void testCanSplitJustAfterASplit() throws Exception {
111 LOG.info("Starting testCanSplitJustAfterASplit");
112 byte[] fam = Bytes.toBytes("cf_split");
114 CompactSplit compactSplit =
115 TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread();
116 TableName tableName = TableName.valueOf("CanSplitTable");
117 Table source = TEST_UTIL.getConnection().getTable(tableName);
118 Admin admin = TEST_UTIL.getAdmin();
119 // set a large min compaction file count to avoid compaction just after splitting.
120 TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
121 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
122 Map<String, StoreFileReader> scanner = Maps.newHashMap();
123 try {
124 admin.createTable(htd);
125 TEST_UTIL.loadTable(source, fam);
126 compactSplit.setCompactionsEnabled(false);
127 admin.split(tableName);
128 TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
130 List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
131 regions.stream()
132 .forEach(r -> r.getStores().get(0).getStorefiles().stream()
133 .filter(s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName()))
134 .forEach(sf -> {
135 StoreFileReader reader = ((HStoreFile) sf).getReader();
136 reader.getStoreFileScanner(true, false, false, 0, 0, false);
137 scanner.put(r.getRegionInfo().getEncodedName(), reader);
138 LOG.info("Got reference to file = " + sf.getPath() + ",for region = " +
139 r.getRegionInfo().getEncodedName());
140 }));
141 assertTrue("Regions did not split properly", regions.size() > 1);
142 assertTrue("Could not get reference any of the store file", scanner.size() > 1);
143 compactSplit.setCompactionsEnabled(true);
144 for (HRegion region : regions) {
145 region.compact(true);
148 regions.stream()
149 .filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName()))
150 .forEach(r -> assertFalse("Contains an open file reference which can be split",
151 r.getStores().get(0).canSplit()));
152 } finally {
153 scanner.values().forEach(s -> {
154 try {
155 s.close(true);
156 } catch (IOException ioe) {
157 LOG.error("Failed while closing store file", ioe);
160 scanner.clear();
161 Closeables.close(source, true);
162 if (!compactSplit.isCompactionsEnabled()) {
163 compactSplit.setCompactionsEnabled(true);
165 TEST_UTIL.deleteTableIfAny(tableName);
170 * Tests that the client sees meta table changes as atomic during splits
172 @Test
173 public void testFromClientSideWhileSplitting() throws Throwable {
174 LOG.info("Starting testFromClientSideWhileSplitting");
175 final TableName tableName = TableName.valueOf(name.getMethodName());
176 final byte[] FAMILY = Bytes.toBytes("family");
178 // SplitTransaction will update the meta table by offlining the parent region, and adding info
179 // for daughters.
180 Table table = TEST_UTIL.createTable(tableName, FAMILY);
182 Stoppable stopper = new StoppableImplementation();
183 RegionSplitter regionSplitter = new RegionSplitter(table);
184 RegionChecker regionChecker = new RegionChecker(CONF, stopper, tableName);
185 final ChoreService choreService = new ChoreService("TEST_SERVER");
187 choreService.scheduleChore(regionChecker);
188 regionSplitter.start();
190 // wait until the splitter is finished
191 regionSplitter.join();
192 stopper.stop(null);
194 if (regionChecker.ex != null) {
195 throw new AssertionError("regionChecker", regionChecker.ex);
198 if (regionSplitter.ex != null) {
199 throw new AssertionError("regionSplitter", regionSplitter.ex);
202 // one final check
203 regionChecker.verify();
206 static class RegionSplitter extends Thread {
207 final Connection connection;
208 Throwable ex;
209 Table table;
210 TableName tableName;
211 byte[] family;
212 Admin admin;
213 HRegionServer rs;
215 RegionSplitter(Table table) throws IOException {
216 this.table = table;
217 this.tableName = table.getName();
218 this.family = table.getDescriptor().getColumnFamilies()[0].getName();
219 admin = TEST_UTIL.getAdmin();
220 rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
221 connection = TEST_UTIL.getConnection();
224 @Override
225 public void run() {
226 try {
227 Random random = ThreadLocalRandom.current();
228 for (int i = 0; i < 5; i++) {
229 List<RegionInfo> regions = MetaTableAccessor.getTableRegions(connection, tableName, true);
230 if (regions.isEmpty()) {
231 continue;
233 int regionIndex = random.nextInt(regions.size());
235 // pick a random region and split it into two
236 RegionInfo region = Iterators.get(regions.iterator(), regionIndex);
238 // pick the mid split point
239 int start = 0, end = Integer.MAX_VALUE;
240 if (region.getStartKey().length > 0) {
241 start = Bytes.toInt(region.getStartKey());
243 if (region.getEndKey().length > 0) {
244 end = Bytes.toInt(region.getEndKey());
246 int mid = start + ((end - start) / 2);
247 byte[] splitPoint = Bytes.toBytes(mid);
249 // put some rows to the regions
250 addData(start);
251 addData(mid);
253 flushAndBlockUntilDone(admin, rs, region.getRegionName());
254 compactAndBlockUntilDone(admin, rs, region.getRegionName());
256 log("Initiating region split for:" + region.getRegionNameAsString());
257 try {
258 admin.splitRegionAsync(region.getRegionName(), splitPoint).get();
259 // wait until the split is complete
260 blockUntilRegionSplit(CONF, 50000, region.getRegionName(), true);
261 } catch (NotServingRegionException ex) {
262 // ignore
265 } catch (Throwable ex) {
266 this.ex = ex;
270 void addData(int start) throws IOException {
271 List<Put> puts = new ArrayList<>();
272 for (int i = start; i < start + 100; i++) {
273 Put put = new Put(Bytes.toBytes(i));
274 put.addColumn(family, family, Bytes.toBytes(i));
275 puts.add(put);
277 table.put(puts);
282 * Checks regions using MetaTableAccessor and HTable methods
284 static class RegionChecker extends ScheduledChore {
285 Connection connection;
286 Configuration conf;
287 TableName tableName;
288 Throwable ex;
290 RegionChecker(Configuration conf, Stoppable stopper, TableName tableName) throws IOException {
291 super("RegionChecker", stopper, 100);
292 this.conf = conf;
293 this.tableName = tableName;
295 this.connection = ConnectionFactory.createConnection(conf);
298 /** verify region boundaries obtained from MetaScanner */
299 void verifyRegionsUsingMetaTableAccessor() throws Exception {
300 List<RegionInfo> regionList = MetaTableAccessor.getTableRegions(connection, tableName, true);
301 verifyTableRegions(regionList.stream()
302 .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
303 regionList = MetaTableAccessor.getAllRegions(connection, true);
304 verifyTableRegions(regionList.stream()
305 .collect(Collectors.toCollection(() -> new TreeSet<>(RegionInfo.COMPARATOR))));
308 /** verify region boundaries obtained from HTable.getStartEndKeys() */
309 void verifyRegionsUsingHTable() throws IOException {
310 try (RegionLocator rl = connection.getRegionLocator(tableName)) {
311 Pair<byte[][], byte[][]> keys = rl.getStartEndKeys();
312 verifyStartEndKeys(keys);
314 Set<RegionInfo> regions = new TreeSet<>(RegionInfo.COMPARATOR);
315 for (HRegionLocation loc : rl.getAllRegionLocations()) {
316 regions.add(loc.getRegion());
318 verifyTableRegions(regions);
322 void verify() throws Exception {
323 verifyRegionsUsingMetaTableAccessor();
324 verifyRegionsUsingHTable();
327 void verifyTableRegions(Set<RegionInfo> regions) {
328 log("Verifying " + regions.size() + " regions: " + regions);
330 byte[][] startKeys = new byte[regions.size()][];
331 byte[][] endKeys = new byte[regions.size()][];
333 int i = 0;
334 for (RegionInfo region : regions) {
335 startKeys[i] = region.getStartKey();
336 endKeys[i] = region.getEndKey();
337 i++;
340 Pair<byte[][], byte[][]> keys = new Pair<>(startKeys, endKeys);
341 verifyStartEndKeys(keys);
344 void verifyStartEndKeys(Pair<byte[][], byte[][]> keys) {
345 byte[][] startKeys = keys.getFirst();
346 byte[][] endKeys = keys.getSecond();
347 assertEquals(startKeys.length, endKeys.length);
348 assertTrue("Found 0 regions for the table", startKeys.length > 0);
350 assertArrayEquals("Start key for the first region is not byte[0]", HConstants.EMPTY_START_ROW,
351 startKeys[0]);
352 byte[] prevEndKey = HConstants.EMPTY_START_ROW;
354 // ensure that we do not have any gaps
355 for (int i = 0; i < startKeys.length; i++) {
356 assertArrayEquals(
357 "Hole in hbase:meta is detected. prevEndKey=" + Bytes.toStringBinary(prevEndKey) +
358 " ,regionStartKey=" + Bytes.toStringBinary(startKeys[i]),
359 prevEndKey, startKeys[i]);
360 prevEndKey = endKeys[i];
362 assertArrayEquals("End key for the last region is not byte[0]", HConstants.EMPTY_END_ROW,
363 endKeys[endKeys.length - 1]);
366 @Override
367 protected void chore() {
368 try {
369 verify();
370 } catch (Throwable ex) {
371 this.ex = ex;
372 getStopper().stop("caught exception");
377 public static void log(String msg) {
378 LOG.info(msg);
381 /* some utility methods for split tests */
383 public static void flushAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
384 throws IOException, InterruptedException {
385 log("flushing region: " + Bytes.toStringBinary(regionName));
386 admin.flushRegion(regionName);
387 log("blocking until flush is complete: " + Bytes.toStringBinary(regionName));
388 Threads.sleepWithoutInterrupt(500);
389 while (rs.getOnlineRegion(regionName).getMemStoreDataSize() > 0) {
390 Threads.sleep(50);
394 public static void compactAndBlockUntilDone(Admin admin, HRegionServer rs, byte[] regionName)
395 throws IOException, InterruptedException {
396 log("Compacting region: " + Bytes.toStringBinary(regionName));
397 // Wait till its online before we do compact else it comes back with NoServerForRegionException
398 try {
399 TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
400 @Override public boolean evaluate() throws Exception {
401 return rs.getServerName().equals(MetaTableAccessor.
402 getRegionLocation(admin.getConnection(), regionName).getServerName());
405 } catch (Exception e) {
406 throw new IOException(e);
408 admin.majorCompactRegion(regionName);
409 log("blocking until compaction is complete: " + Bytes.toStringBinary(regionName));
410 Threads.sleepWithoutInterrupt(500);
411 outer: for (;;) {
412 for (Store store : rs.getOnlineRegion(regionName).getStores()) {
413 if (store.getStorefilesCount() > 1) {
414 Threads.sleep(50);
415 continue outer;
418 break;
423 * Blocks until the region split is complete in hbase:meta and region server opens the daughters
425 public static void blockUntilRegionSplit(Configuration conf, long timeout,
426 final byte[] regionName, boolean waitForDaughters) throws IOException, InterruptedException {
427 long start = EnvironmentEdgeManager.currentTime();
428 log("blocking until region is split:" + Bytes.toStringBinary(regionName));
429 RegionInfo daughterA = null, daughterB = null;
430 try (Connection conn = ConnectionFactory.createConnection(conf);
431 Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
432 Result result = null;
433 RegionInfo region = null;
434 while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
435 result = metaTable.get(new Get(regionName));
436 if (result == null) {
437 break;
440 region = CatalogFamilyFormat.getRegionInfo(result);
441 if (region.isSplitParent()) {
442 log("found parent region: " + region.toString());
443 PairOfSameType<RegionInfo> pair = MetaTableAccessor.getDaughterRegions(result);
444 daughterA = pair.getFirst();
445 daughterB = pair.getSecond();
446 break;
448 Threads.sleep(100);
450 if (daughterA == null || daughterB == null) {
451 throw new IOException("Failed to get daughters, daughterA=" + daughterA + ", daughterB=" +
452 daughterB + ", timeout=" + timeout + ", result=" + result + ", regionName=" +
453 Bytes.toString(regionName) + ", region=" + region);
456 // if we are here, this means the region split is complete or timed out
457 if (waitForDaughters) {
458 long rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
459 blockUntilRegionIsInMeta(conn, rem, daughterA);
461 rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
462 blockUntilRegionIsInMeta(conn, rem, daughterB);
464 rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
465 blockUntilRegionIsOpened(conf, rem, daughterA);
467 rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
468 blockUntilRegionIsOpened(conf, rem, daughterB);
470 // Compacting the new region to make sure references can be cleaned up
471 compactAndBlockUntilDone(TEST_UTIL.getAdmin(),
472 TEST_UTIL.getMiniHBaseCluster().getRegionServer(0), daughterA.getRegionName());
473 compactAndBlockUntilDone(TEST_UTIL.getAdmin(),
474 TEST_UTIL.getMiniHBaseCluster().getRegionServer(0), daughterB.getRegionName());
476 removeCompactedFiles(conn, timeout, daughterA);
477 removeCompactedFiles(conn, timeout, daughterB);
482 public static void removeCompactedFiles(Connection conn, long timeout, RegionInfo hri)
483 throws IOException, InterruptedException {
484 log("remove compacted files for : " + hri.getRegionNameAsString());
485 List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(hri.getTable());
486 regions.stream().forEach(r -> {
487 try {
488 r.getStores().get(0).closeAndArchiveCompactedFiles();
489 } catch (IOException ioe) {
490 LOG.error("failed in removing compacted file", ioe);
495 public static void blockUntilRegionIsInMeta(Connection conn, long timeout, RegionInfo hri)
496 throws IOException, InterruptedException {
497 log("blocking until region is in META: " + hri.getRegionNameAsString());
498 long start = EnvironmentEdgeManager.currentTime();
499 while (EnvironmentEdgeManager.currentTime() - start < timeout) {
500 HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri);
501 if (loc != null && !loc.getRegion().isOffline()) {
502 log("found region in META: " + hri.getRegionNameAsString());
503 break;
505 Threads.sleep(100);
509 public static void blockUntilRegionIsOpened(Configuration conf, long timeout, RegionInfo hri)
510 throws IOException, InterruptedException {
511 log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
512 long start = EnvironmentEdgeManager.currentTime();
513 try (Connection conn = ConnectionFactory.createConnection(conf);
514 Table table = conn.getTable(hri.getTable())) {
515 byte[] row = hri.getStartKey();
516 // Check for null/empty row. If we find one, use a key that is likely to be in first region.
517 if (row == null || row.length <= 0) {
518 row = new byte[] { '0' };
520 Get get = new Get(row);
521 while (EnvironmentEdgeManager.currentTime() - start < timeout) {
522 try {
523 table.get(get);
524 break;
525 } catch (IOException ex) {
526 // wait some more
528 Threads.sleep(100);