2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.client
;
20 import java
.util
.List
;
21 import java
.util
.regex
.Pattern
;
22 import org
.apache
.hadoop
.conf
.Configuration
;
23 import org
.apache
.hadoop
.fs
.FileSystem
;
24 import org
.apache
.hadoop
.fs
.Path
;
25 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
26 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
27 import org
.apache
.hadoop
.hbase
.HConstants
;
28 import org
.apache
.hadoop
.hbase
.TableName
;
29 import org
.apache
.hadoop
.hbase
.master
.snapshot
.SnapshotManager
;
30 import org
.apache
.hadoop
.hbase
.regionserver
.ConstantSizeRegionSplitPolicy
;
31 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotTestingUtils
;
32 import org
.apache
.hadoop
.hbase
.testclassification
.ClientTests
;
33 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
34 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
35 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
36 import org
.apache
.hadoop
.hbase
.util
.Threads
;
37 import org
.junit
.After
;
38 import org
.junit
.AfterClass
;
39 import org
.junit
.Assert
;
40 import org
.junit
.Before
;
41 import org
.junit
.BeforeClass
;
42 import org
.junit
.ClassRule
;
43 import org
.junit
.Rule
;
44 import org
.junit
.Test
;
45 import org
.junit
.experimental
.categories
.Category
;
46 import org
.junit
.rules
.TestName
;
47 import org
.slf4j
.Logger
;
48 import org
.slf4j
.LoggerFactory
;
51 * Test to verify that the cloned table is independent of the table from which it was cloned
53 @Category({LargeTests
.class, ClientTests
.class})
54 public class TestSnapshotCloneIndependence
{
57 public static final HBaseClassTestRule CLASS_RULE
=
58 HBaseClassTestRule
.forClass(TestSnapshotCloneIndependence
.class);
60 private static final Logger LOG
= LoggerFactory
.getLogger(TestSnapshotCloneIndependence
.class);
63 public TestName testName
= new TestName();
65 protected static final HBaseTestingUtil UTIL
= new HBaseTestingUtil();
67 protected static final int NUM_RS
= 2;
68 private static final String TEST_FAM_STR
= "fam";
69 protected static final byte[] TEST_FAM
= Bytes
.toBytes(TEST_FAM_STR
);
70 private static final int CLEANER_INTERVAL
= 100;
72 private FileSystem fs
;
75 private TableName originalTableName
;
76 private Table originalTable
;
77 private TableName cloneTableName
;
78 private int countOriginalTable
;
79 String snapshotNameAsString
;
83 * Setup the config for the cluster and start it
86 public static void setupCluster() throws Exception
{
87 setupConf(UTIL
.getConfiguration());
88 UTIL
.startMiniCluster(NUM_RS
);
91 static void setupConf(Configuration conf
) {
92 // Up the handlers; this test needs more than usual.
93 conf
.setInt(HConstants
.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT
, 15);
94 // enable snapshot support
95 conf
.setBoolean(SnapshotManager
.HBASE_SNAPSHOT_ENABLED
, true);
96 // change the flush size to a small amount, regulating number of store files
97 conf
.setInt("hbase.hregion.memstore.flush.size", 25000);
98 // so make sure we get a compaction when doing a load, but keep around
99 // some files in the store
100 conf
.setInt("hbase.hstore.compaction.min", 10);
101 conf
.setInt("hbase.hstore.compactionThreshold", 10);
102 // block writes if we get to 12 store files
103 conf
.setInt("hbase.hstore.blockingStoreFiles", 12);
104 conf
.setInt("hbase.regionserver.msginterval", 100);
105 conf
.setBoolean("hbase.master.enabletable.roundrobin", true);
106 // Avoid potentially aggressive splitting which would cause snapshot to fail
107 conf
.set(HConstants
.HBASE_REGION_SPLIT_POLICY_KEY
,
108 ConstantSizeRegionSplitPolicy
.class.getName());
109 // Execute cleaner frequently to induce failures
110 conf
.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL
);
111 conf
.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL
);
112 // Effectively disable TimeToLiveHFileCleaner. Don't want to fully disable it because that
113 // will even trigger races between creating the directory containing back references and
114 // the back reference itself.
115 conf
.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL
);
119 public void setup() throws Exception
{
120 fs
= UTIL
.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
121 rootDir
= UTIL
.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
123 admin
= UTIL
.getAdmin();
124 originalTableName
= TableName
.valueOf("test" + testName
.getMethodName());
125 cloneTableName
= TableName
.valueOf("test-clone-" + originalTableName
);
126 snapshotNameAsString
= "snapshot_" + originalTableName
;
127 snapshotName
= snapshotNameAsString
;
129 originalTable
= createTable(originalTableName
, TEST_FAM
);
130 loadData(originalTable
, TEST_FAM
);
131 countOriginalTable
= countRows(originalTable
);
132 System
.out
.println("Original table has: " + countOriginalTable
+ " rows");
136 public void tearDown() throws Exception
{
137 UTIL
.deleteTable(originalTableName
);
138 UTIL
.deleteTable(cloneTableName
);
139 SnapshotTestingUtils
.deleteAllSnapshots(UTIL
.getAdmin());
140 SnapshotTestingUtils
.deleteArchiveDirectory(UTIL
);
144 public static void cleanupTest() throws Exception
{
146 UTIL
.shutdownMiniCluster();
147 } catch (Exception e
) {
148 LOG
.warn("failure shutting down cluster", e
);
153 * Verify that adding data to the cloned table will not affect the original, and vice-versa when
154 * it is taken as an online snapshot.
157 public void testOnlineSnapshotAppendIndependent() throws Exception
{
158 createAndCloneSnapshot(true);
159 runTestSnapshotAppendIndependent();
163 * Verify that adding data to the cloned table will not affect the original, and vice-versa when
164 * it is taken as an offline snapshot.
167 public void testOfflineSnapshotAppendIndependent() throws Exception
{
168 createAndCloneSnapshot(false);
169 runTestSnapshotAppendIndependent();
173 * Verify that adding metadata to the cloned table will not affect the original, and vice-versa
174 * when it is taken as an online snapshot.
177 public void testOnlineSnapshotMetadataChangesIndependent() throws Exception
{
178 createAndCloneSnapshot(true);
179 runTestSnapshotMetadataChangesIndependent();
183 * Verify that adding netadata to the cloned table will not affect the original, and vice-versa
184 * when is taken as an online snapshot.
187 public void testOfflineSnapshotMetadataChangesIndependent() throws Exception
{
188 createAndCloneSnapshot(false);
189 runTestSnapshotMetadataChangesIndependent();
193 * Verify that region operations, in this case splitting a region, are independent between the
194 * cloned table and the original.
197 public void testOfflineSnapshotRegionOperationsIndependent() throws Exception
{
198 createAndCloneSnapshot(false);
199 runTestRegionOperationsIndependent();
203 * Verify that region operations, in this case splitting a region, are independent between the
204 * cloned table and the original.
207 public void testOnlineSnapshotRegionOperationsIndependent() throws Exception
{
208 createAndCloneSnapshot(true);
209 runTestRegionOperationsIndependent();
213 public void testOfflineSnapshotDeleteIndependent() throws Exception
{
214 createAndCloneSnapshot(false);
215 runTestSnapshotDeleteIndependent();
219 public void testOnlineSnapshotDeleteIndependent() throws Exception
{
220 createAndCloneSnapshot(true);
221 runTestSnapshotDeleteIndependent();
224 private static void waitOnSplit(Connection c
, final Table t
, int originalCount
) throws Exception
{
225 for (int i
= 0; i
< 200; i
++) {
226 Threads
.sleepWithoutInterrupt(500);
227 try (RegionLocator locator
= c
.getRegionLocator(t
.getName())) {
228 if (locator
.getAllRegionLocations().size() > originalCount
) {
233 throw new Exception("Split did not increase the number of regions");
237 * Takes the snapshot of originalTable and clones the snapshot to another tables.
238 * If {@code online} is false, the original table is disabled during taking snapshot, so also
240 * @param online - Whether the table is online or not during the snapshot
242 private void createAndCloneSnapshot(boolean online
) throws Exception
{
243 SnapshotTestingUtils
.createSnapshotAndValidate(admin
, originalTableName
, TEST_FAM_STR
,
244 snapshotNameAsString
, rootDir
, fs
, online
);
246 // If offline, enable the table disabled by snapshot testing util.
248 admin
.enableTable(originalTableName
);
249 UTIL
.waitTableAvailable(originalTableName
);
252 admin
.cloneSnapshot(snapshotName
, cloneTableName
);
253 UTIL
.waitUntilAllRegionsAssigned(cloneTableName
);
257 * Verify that adding data to original table or clone table doesn't affect other table.
259 private void runTestSnapshotAppendIndependent() throws Exception
{
260 try (Table clonedTable
= UTIL
.getConnection().getTable(cloneTableName
)) {
261 final int clonedTableRowCount
= countRows(clonedTable
);
264 "The line counts of original and cloned tables do not match after clone. ",
265 countOriginalTable
, clonedTableRowCount
);
267 // Attempt to add data to the test
268 Put p
= new Put(Bytes
.toBytes("new-row-" + EnvironmentEdgeManager
.currentTime()));
269 p
.addColumn(TEST_FAM
, Bytes
.toBytes("someQualifier"), Bytes
.toBytes("someString"));
270 originalTable
.put(p
);
272 // Verify that the new row is not in the restored table
273 Assert
.assertEquals("The row count of the original table was not modified by the put",
274 countOriginalTable
+ 1, countRows(originalTable
));
276 "The row count of the cloned table changed as a result of addition to the original",
277 clonedTableRowCount
, countRows(clonedTable
));
279 Put p2
= new Put(Bytes
.toBytes("new-row-" + EnvironmentEdgeManager
.currentTime()));
280 p2
.addColumn(TEST_FAM
, Bytes
.toBytes("someQualifier"), Bytes
.toBytes("someString"));
283 // Verify that the row is not added to the original table.
285 "The row count of the original table was modified by the put to the clone",
286 countOriginalTable
+ 1, countRows(originalTable
));
287 Assert
.assertEquals("The row count of the cloned table was not modified by the put",
288 clonedTableRowCount
+ 1, countRows(clonedTable
));
293 * Do a split, and verify that this only affects one table
295 private void runTestRegionOperationsIndependent() throws Exception
{
296 // Verify that region information is the same pre-split
297 UTIL
.getConnection().clearRegionLocationCache();
298 List
<RegionInfo
> originalTableHRegions
= admin
.getRegions(originalTableName
);
300 final int originalRegionCount
= originalTableHRegions
.size();
301 final int cloneTableRegionCount
= admin
.getRegions(cloneTableName
).size();
303 "The number of regions in the cloned table is different than in the original table.",
304 originalRegionCount
, cloneTableRegionCount
);
306 // Split a region on the parent table
307 admin
.splitRegionAsync(originalTableHRegions
.get(0).getRegionName()).get();
308 waitOnSplit(UTIL
.getConnection(), originalTable
, originalRegionCount
);
310 // Verify that the cloned table region is not split
311 final int cloneTableRegionCount2
= admin
.getRegions(cloneTableName
).size();
313 "The number of regions in the cloned table changed though none of its regions were split.",
314 cloneTableRegionCount
, cloneTableRegionCount2
);
318 * Add metadata, and verify that this only affects one table
320 private void runTestSnapshotMetadataChangesIndependent() throws Exception
{
321 // Add a new column family to the original table
322 byte[] TEST_FAM_2
= Bytes
.toBytes("fam2");
323 ColumnFamilyDescriptor familyDescriptor
= ColumnFamilyDescriptorBuilder
.of(TEST_FAM_2
);
325 admin
.disableTable(originalTableName
);
326 admin
.addColumnFamily(originalTableName
, familyDescriptor
);
328 // Verify that it is not in the snapshot
329 admin
.enableTable(originalTableName
);
330 UTIL
.waitTableAvailable(originalTableName
);
332 // get a description of the cloned table
333 // get a list of its families
334 // assert that the family is there
335 TableDescriptor originalTableDescriptor
= originalTable
.getDescriptor();
336 TableDescriptor clonedTableDescriptor
= admin
.getDescriptor(cloneTableName
);
338 Assert
.assertTrue("The original family was not found. There is something wrong. ",
339 originalTableDescriptor
.hasColumnFamily(TEST_FAM
));
340 Assert
.assertTrue("The original family was not found in the clone. There is something wrong. ",
341 clonedTableDescriptor
.hasColumnFamily(TEST_FAM
));
343 Assert
.assertTrue("The new family was not found. ",
344 originalTableDescriptor
.hasColumnFamily(TEST_FAM_2
));
345 Assert
.assertTrue("The new family was not found. ",
346 !clonedTableDescriptor
.hasColumnFamily(TEST_FAM_2
));
350 * Verify that deleting the snapshot does not affect either table.
352 private void runTestSnapshotDeleteIndependent() throws Exception
{
353 // Ensure the original table does not reference the HFiles anymore
354 admin
.majorCompact(originalTableName
);
356 // Deleting the snapshot used to break the cloned table by deleting in-use HFiles
357 admin
.deleteSnapshot(snapshotName
);
359 // Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted
360 Pattern pattern
= Pattern
.compile(snapshotNameAsString
);
363 } while (!admin
.listSnapshots(pattern
).isEmpty());
365 try (Table original
= UTIL
.getConnection().getTable(originalTableName
)) {
366 try (Table clonedTable
= UTIL
.getConnection().getTable(cloneTableName
)) {
367 // Verify that all regions of both tables are readable
368 final int origTableRowCount
= countRows(original
);
369 final int clonedTableRowCount
= countRows(clonedTable
);
370 Assert
.assertEquals(origTableRowCount
, clonedTableRowCount
);
375 protected Table
createTable(final TableName table
, byte[] family
) throws Exception
{
376 Table t
= UTIL
.createTable(table
, family
);
377 // Wait for everything to be ready with the table
378 UTIL
.waitUntilAllRegionsAssigned(table
);
380 // At this point the table should be good to go.
384 public void loadData(final Table table
, byte[]... families
) throws Exception
{
385 UTIL
.loadTable(originalTable
, TEST_FAM
);
388 protected int countRows(final Table table
, final byte[]... families
) throws Exception
{
389 return UTIL
.countRows(table
, families
);