3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.backup
;
21 import java
.io
.IOException
;
22 import java
.util
.ArrayList
;
23 import java
.util
.HashMap
;
24 import java
.util
.Iterator
;
25 import java
.util
.List
;
27 import java
.util
.Map
.Entry
;
29 import org
.apache
.commons
.logging
.Log
;
30 import org
.apache
.commons
.logging
.LogFactory
;
31 import org
.apache
.hadoop
.conf
.Configuration
;
32 import org
.apache
.hadoop
.fs
.FileSystem
;
33 import org
.apache
.hadoop
.fs
.LocatedFileStatus
;
34 import org
.apache
.hadoop
.fs
.Path
;
35 import org
.apache
.hadoop
.fs
.RemoteIterator
;
36 import org
.apache
.hadoop
.hbase
.HBaseConfiguration
;
37 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
38 import org
.apache
.hadoop
.hbase
.HColumnDescriptor
;
39 import org
.apache
.hadoop
.hbase
.HConstants
;
40 import org
.apache
.hadoop
.hbase
.HTableDescriptor
;
41 import org
.apache
.hadoop
.hbase
.NamespaceDescriptor
;
42 import org
.apache
.hadoop
.hbase
.TableName
;
43 import org
.apache
.hadoop
.hbase
.backup
.BackupInfo
.BackupPhase
;
44 import org
.apache
.hadoop
.hbase
.backup
.BackupInfo
.BackupState
;
45 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupAdminImpl
;
46 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupManager
;
47 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupSystemTable
;
48 import org
.apache
.hadoop
.hbase
.backup
.impl
.FullTableBackupClient
;
49 import org
.apache
.hadoop
.hbase
.backup
.impl
.IncrementalBackupManager
;
50 import org
.apache
.hadoop
.hbase
.backup
.impl
.IncrementalTableBackupClient
;
51 import org
.apache
.hadoop
.hbase
.backup
.master
.LogRollMasterProcedureManager
;
52 import org
.apache
.hadoop
.hbase
.backup
.util
.BackupUtils
;
53 import org
.apache
.hadoop
.hbase
.client
.Admin
;
54 import org
.apache
.hadoop
.hbase
.client
.Connection
;
55 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
56 import org
.apache
.hadoop
.hbase
.client
.Durability
;
57 import org
.apache
.hadoop
.hbase
.client
.HBaseAdmin
;
58 import org
.apache
.hadoop
.hbase
.client
.HTable
;
59 import org
.apache
.hadoop
.hbase
.client
.Put
;
60 import org
.apache
.hadoop
.hbase
.client
.Table
;
61 import org
.apache
.hadoop
.hbase
.coprocessor
.CoprocessorHost
;
62 import org
.apache
.hadoop
.hbase
.mapreduce
.HadoopSecurityEnabledUserProviderForTesting
;
63 import org
.apache
.hadoop
.hbase
.security
.UserProvider
;
64 import org
.apache
.hadoop
.hbase
.security
.access
.SecureTestUtil
;
65 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotTestingUtils
;
66 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
67 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
68 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
69 import org
.junit
.AfterClass
;
70 import org
.junit
.Before
;
73 * This class is only a base for other integration-level backup tests. Do not add tests here.
74 * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
75 * tests should have their own classes and extend this one
77 public class TestBackupBase
{
79 private static final Log LOG
= LogFactory
.getLog(TestBackupBase
.class);
81 protected static HBaseTestingUtility TEST_UTIL
= new HBaseTestingUtility();
82 protected static HBaseTestingUtility TEST_UTIL2
;
83 protected static Configuration conf1
= TEST_UTIL
.getConfiguration();
84 protected static Configuration conf2
;
86 protected static TableName table1
= TableName
.valueOf("table1");
87 protected static HTableDescriptor table1Desc
;
88 protected static TableName table2
= TableName
.valueOf("table2");
89 protected static TableName table3
= TableName
.valueOf("table3");
90 protected static TableName table4
= TableName
.valueOf("table4");
92 protected static TableName table1_restore
= TableName
.valueOf("ns1:table1_restore");
93 protected static TableName table2_restore
= TableName
.valueOf("ns2:table2_restore");
94 protected static TableName table3_restore
= TableName
.valueOf("ns3:table3_restore");
95 protected static TableName table4_restore
= TableName
.valueOf("ns4:table4_restore");
97 protected static final int NB_ROWS_IN_BATCH
= 99;
98 protected static final byte[] qualName
= Bytes
.toBytes("q1");
99 protected static final byte[] famName
= Bytes
.toBytes("f");
101 protected static String BACKUP_ROOT_DIR
= "/backupUT";
102 protected static String BACKUP_REMOTE_ROOT_DIR
= "/backupUT";
103 protected static String provider
= "defaultProvider";
104 protected static boolean secure
= false;
106 protected static boolean autoRestoreOnFailure
= true;
107 protected static boolean setupIsDone
= false;
108 protected static boolean useSecondCluster
= false;
111 static class IncrementalTableBackupClientForTest
extends IncrementalTableBackupClient
114 public IncrementalTableBackupClientForTest() {
117 public IncrementalTableBackupClientForTest(Connection conn
,
118 String backupId
, BackupRequest request
) throws IOException
{
119 super(conn
, backupId
, request
);
123 public void execute() throws IOException
125 // case INCREMENTAL_COPY:
127 // case PREPARE_INCREMENTAL:
128 failStageIf(Stage
.stage_0
);
129 beginBackup(backupManager
, backupInfo
);
131 failStageIf(Stage
.stage_1
);
132 backupInfo
.setPhase(BackupPhase
.PREPARE_INCREMENTAL
);
133 LOG
.debug("For incremental backup, current table set is "
134 + backupManager
.getIncrementalBackupTableSet());
135 newTimestamps
= ((IncrementalBackupManager
) backupManager
).getIncrBackupLogFileMap();
136 // copy out the table and region info files for each table
137 BackupUtils
.copyTableRegionInfo(conn
, backupInfo
, conf
);
138 // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
139 convertWALsToHFiles(backupInfo
);
140 incrementalCopyHFiles(backupInfo
);
141 failStageIf(Stage
.stage_2
);
142 // Save list of WAL files copied
143 backupManager
.recordWALFiles(backupInfo
.getIncrBackupFileList());
145 // case INCR_BACKUP_COMPLETE:
146 // set overall backup status: complete. Here we make sure to complete the backup.
147 // After this checkpoint, even if entering cancel process, will let the backup finished
148 // Set the previousTimestampMap which is before this current log roll to the manifest.
149 HashMap
<TableName
, HashMap
<String
, Long
>> previousTimestampMap
=
150 backupManager
.readLogTimestampMap();
151 backupInfo
.setIncrTimestampMap(previousTimestampMap
);
153 // The table list in backupInfo is good for both full backup and incremental backup.
154 // For incremental backup, it contains the incremental backup table set.
155 backupManager
.writeRegionServerLogTimestamp(backupInfo
.getTables(), newTimestamps
);
156 failStageIf(Stage
.stage_3
);
158 HashMap
<TableName
, HashMap
<String
, Long
>> newTableSetTimestampMap
=
159 backupManager
.readLogTimestampMap();
162 BackupUtils
.getMinValue(BackupUtils
.getRSLogTimestampMins(newTableSetTimestampMap
));
163 backupManager
.writeBackupStartCode(newStartCode
);
165 handleBulkLoad(backupInfo
.getTableNames());
166 failStageIf(Stage
.stage_4
);
169 completeBackup(conn
, backupInfo
, backupManager
, BackupType
.INCREMENTAL
, conf
);
171 } catch (Exception e
) {
172 failBackup(conn
, backupInfo
, backupManager
, e
, "Unexpected Exception : ",
173 BackupType
.INCREMENTAL
, conf
);
174 throw new IOException(e
);
180 static class FullTableBackupClientForTest
extends FullTableBackupClient
184 public FullTableBackupClientForTest() {
187 public FullTableBackupClientForTest(Connection conn
, String backupId
, BackupRequest request
)
189 super(conn
, backupId
, request
);
193 public void execute() throws IOException
195 // Get the stage ID to fail on
196 try (Admin admin
= conn
.getAdmin();) {
198 beginBackup(backupManager
, backupInfo
);
199 failStageIf(Stage
.stage_0
);
200 String savedStartCode
= null;
201 boolean firstBackup
= false;
202 // do snapshot for full table backup
203 savedStartCode
= backupManager
.readBackupStartCode();
204 firstBackup
= savedStartCode
== null || Long
.parseLong(savedStartCode
) == 0L;
206 // This is our first backup. Let's put some marker to system table so that we can hold the logs
207 // while we do the backup.
208 backupManager
.writeBackupStartCode(0L);
210 failStageIf(Stage
.stage_1
);
211 // We roll log here before we do the snapshot. It is possible there is duplicate data
212 // in the log that is already in the snapshot. But if we do it after the snapshot, we
213 // could have data loss.
214 // A better approach is to do the roll log on each RS in the same global procedure as
216 LOG
.info("Execute roll log procedure for full backup ...");
218 Map
<String
, String
> props
= new HashMap
<String
, String
>();
219 props
.put("backupRoot", backupInfo
.getBackupRootDir());
220 admin
.execProcedure(LogRollMasterProcedureManager
.ROLLLOG_PROCEDURE_SIGNATURE
,
221 LogRollMasterProcedureManager
.ROLLLOG_PROCEDURE_NAME
, props
);
222 failStageIf(Stage
.stage_2
);
223 newTimestamps
= backupManager
.readRegionServerLastLogRollResult();
225 // Updates registered log files
226 // We record ALL old WAL files as registered, because
227 // this is a first full backup in the system and these
228 // files are not needed for next incremental backup
229 List
<String
> logFiles
= BackupUtils
.getWALFilesOlderThan(conf
, newTimestamps
);
230 backupManager
.recordWALFiles(logFiles
);
234 backupInfo
.setPhase(BackupPhase
.SNAPSHOT
);
235 for (TableName tableName
: tableList
) {
236 String snapshotName
=
237 "snapshot_" + Long
.toString(EnvironmentEdgeManager
.currentTime()) + "_"
238 + tableName
.getNamespaceAsString() + "_" + tableName
.getQualifierAsString();
240 snapshotTable(admin
, tableName
, snapshotName
);
241 backupInfo
.setSnapshotName(tableName
, snapshotName
);
243 failStageIf(Stage
.stage_3
);
246 LOG
.debug("snapshot copy for " + backupId
);
247 snapshotCopy(backupInfo
);
248 // Updates incremental backup table set
249 backupManager
.addIncrementalBackupTableSet(backupInfo
.getTables());
252 // set overall backup status: complete. Here we make sure to complete the backup.
253 // After this checkpoint, even if entering cancel process, will let the backup finished
254 backupInfo
.setState(BackupState
.COMPLETE
);
255 // The table list in backupInfo is good for both full backup and incremental backup.
256 // For incremental backup, it contains the incremental backup table set.
257 backupManager
.writeRegionServerLogTimestamp(backupInfo
.getTables(), newTimestamps
);
259 HashMap
<TableName
, HashMap
<String
, Long
>> newTableSetTimestampMap
=
260 backupManager
.readLogTimestampMap();
263 BackupUtils
.getMinValue(BackupUtils
264 .getRSLogTimestampMins(newTableSetTimestampMap
));
265 backupManager
.writeBackupStartCode(newStartCode
);
266 failStageIf(Stage
.stage_4
);
268 completeBackup(conn
, backupInfo
, backupManager
, BackupType
.FULL
, conf
);
270 } catch (Exception e
) {
272 if(autoRestoreOnFailure
) {
273 failBackup(conn
, backupInfo
, backupManager
, e
, "Unexpected BackupException : ",
274 BackupType
.FULL
, conf
);
276 throw new IOException(e
);
284 * @throws java.lang.Exception
287 public void setUp() throws Exception
{
292 // set the always on security provider
293 UserProvider
.setUserProviderForTesting(TEST_UTIL
.getConfiguration(),
294 HadoopSecurityEnabledUserProviderForTesting
.class);
295 // setup configuration
296 SecureTestUtil
.enableSecurity(TEST_UTIL
.getConfiguration());
298 String coproc
= conf1
.get(CoprocessorHost
.REGION_COPROCESSOR_CONF_KEY
);
299 conf1
.set(CoprocessorHost
.REGION_COPROCESSOR_CONF_KEY
, (coproc
== null ?
"" : coproc
+ ",") +
300 BackupObserver
.class.getName());
301 conf1
.setBoolean(BackupRestoreConstants
.BACKUP_ENABLE_KEY
, true);
302 BackupManager
.decorateMasterConfiguration(conf1
);
303 BackupManager
.decorateRegionServerConfiguration(conf1
);
304 conf1
.set(HConstants
.ZOOKEEPER_ZNODE_PARENT
, "/1");
305 // Set MultiWAL (with 2 default WAL files per RS)
306 conf1
.set(WALFactory
.WAL_PROVIDER
, provider
);
307 TEST_UTIL
.startMiniCluster();
309 if (useSecondCluster
) {
310 conf2
= HBaseConfiguration
.create(conf1
);
311 conf2
.set(HConstants
.ZOOKEEPER_ZNODE_PARENT
, "/2");
312 TEST_UTIL2
= new HBaseTestingUtility(conf2
);
313 TEST_UTIL2
.setZkCluster(TEST_UTIL
.getZkCluster());
314 TEST_UTIL2
.startMiniCluster();
316 conf1
= TEST_UTIL
.getConfiguration();
318 TEST_UTIL
.startMiniMapReduceCluster();
319 BACKUP_ROOT_DIR
= TEST_UTIL
.getConfiguration().get("fs.defaultFS") + "/backupUT";
320 LOG
.info("ROOTDIR " + BACKUP_ROOT_DIR
);
321 if (useSecondCluster
) {
322 BACKUP_REMOTE_ROOT_DIR
= TEST_UTIL2
.getConfiguration().get("fs.defaultFS") + "/backupUT";
323 LOG
.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR
);
326 populateFromMasterConfig(TEST_UTIL
.getHBaseCluster().getMaster().getConfiguration(), conf1
);
330 private static void populateFromMasterConfig(Configuration masterConf
, Configuration conf
) {
331 Iterator
<Entry
<String
, String
>> it
= masterConf
.iterator();
332 while (it
.hasNext()) {
333 Entry
<String
, String
> e
= it
.next();
334 conf
.set(e
.getKey(), e
.getValue());
339 * @throws java.lang.Exception
342 public static void tearDown() throws Exception
{
344 SnapshotTestingUtils
.deleteAllSnapshots(TEST_UTIL
.getHBaseAdmin());
345 } catch (Exception e
) {
347 SnapshotTestingUtils
.deleteArchiveDirectory(TEST_UTIL
);
348 if (useSecondCluster
) {
349 TEST_UTIL2
.shutdownMiniCluster();
351 TEST_UTIL
.shutdownMiniCluster();
352 TEST_UTIL
.shutdownMiniMapReduceCluster();
355 HTable
insertIntoTable(Connection conn
, TableName table
, byte[] family
, int id
, int numRows
)
357 HTable t
= (HTable
) conn
.getTable(table
);
359 for (int i
= 0; i
< numRows
; i
++) {
360 p1
= new Put(Bytes
.toBytes("row-" + table
+ "-" + id
+ "-" + i
));
361 p1
.addColumn(family
, qualName
, Bytes
.toBytes("val" + i
));
368 protected BackupRequest
createBackupRequest(BackupType type
,
369 List
<TableName
> tables
, String path
) {
370 BackupRequest
.Builder builder
= new BackupRequest
.Builder();
371 BackupRequest request
= builder
.withBackupType(type
)
372 .withTableList(tables
)
373 .withTargetRootDir(path
).build();
377 protected String
backupTables(BackupType type
, List
<TableName
> tables
, String path
)
379 Connection conn
= null;
380 BackupAdmin badmin
= null;
383 conn
= ConnectionFactory
.createConnection(conf1
);
384 badmin
= new BackupAdminImpl(conn
);
385 BackupRequest request
= createBackupRequest(type
, tables
, path
);
386 backupId
= badmin
.backupTables(request
);
388 if (badmin
!= null) {
398 protected String
fullTableBackup(List
<TableName
> tables
) throws IOException
{
399 return backupTables(BackupType
.FULL
, tables
, BACKUP_ROOT_DIR
);
402 protected String
incrementalTableBackup(List
<TableName
> tables
) throws IOException
{
403 return backupTables(BackupType
.INCREMENTAL
, tables
, BACKUP_ROOT_DIR
);
406 protected static void loadTable(Table table
) throws Exception
{
408 Put p
; // 100 + 1 row to t1_syncup
409 for (int i
= 0; i
< NB_ROWS_IN_BATCH
; i
++) {
410 p
= new Put(Bytes
.toBytes("row" + i
));
411 p
.setDurability(Durability
.SKIP_WAL
);
412 p
.addColumn(famName
, qualName
, Bytes
.toBytes("val" + i
));
417 protected static void createTables() throws Exception
{
419 long tid
= System
.currentTimeMillis();
420 table1
= TableName
.valueOf("ns1:test-" + tid
);
421 HBaseAdmin ha
= TEST_UTIL
.getHBaseAdmin();
424 NamespaceDescriptor desc1
= NamespaceDescriptor
.create("ns1").build();
425 NamespaceDescriptor desc2
= NamespaceDescriptor
.create("ns2").build();
426 NamespaceDescriptor desc3
= NamespaceDescriptor
.create("ns3").build();
427 NamespaceDescriptor desc4
= NamespaceDescriptor
.create("ns4").build();
429 ha
.createNamespace(desc1
);
430 ha
.createNamespace(desc2
);
431 ha
.createNamespace(desc3
);
432 ha
.createNamespace(desc4
);
434 HTableDescriptor desc
= new HTableDescriptor(table1
);
435 HColumnDescriptor fam
= new HColumnDescriptor(famName
);
437 ha
.createTable(desc
);
439 Connection conn
= ConnectionFactory
.createConnection(conf1
);
440 Table table
= conn
.getTable(table1
);
443 table2
= TableName
.valueOf("ns2:test-" + tid
+ 1);
444 desc
= new HTableDescriptor(table2
);
446 ha
.createTable(desc
);
447 table
= conn
.getTable(table2
);
450 table3
= TableName
.valueOf("ns3:test-" + tid
+ 2);
451 table
= TEST_UTIL
.createTable(table3
, famName
);
453 table4
= TableName
.valueOf("ns4:test-" + tid
+ 3);
454 table
= TEST_UTIL
.createTable(table4
, famName
);
460 protected boolean checkSucceeded(String backupId
) throws IOException
{
461 BackupInfo status
= getBackupInfo(backupId
);
462 if (status
== null) return false;
463 return status
.getState() == BackupState
.COMPLETE
;
466 protected boolean checkFailed(String backupId
) throws IOException
{
467 BackupInfo status
= getBackupInfo(backupId
);
468 if (status
== null) return false;
469 return status
.getState() == BackupState
.FAILED
;
472 private BackupInfo
getBackupInfo(String backupId
) throws IOException
{
473 try (BackupSystemTable table
= new BackupSystemTable(TEST_UTIL
.getConnection())) {
474 BackupInfo status
= table
.readBackupInfo(backupId
);
479 protected BackupAdmin
getBackupAdmin() throws IOException
{
480 return new BackupAdminImpl(TEST_UTIL
.getConnection());
486 protected List
<TableName
> toList(String
... args
) {
487 List
<TableName
> ret
= new ArrayList
<>();
488 for (int i
= 0; i
< args
.length
; i
++) {
489 ret
.add(TableName
.valueOf(args
[i
]));
494 protected void dumpBackupDir() throws IOException
{
496 FileSystem fs
= FileSystem
.get(conf1
);
497 RemoteIterator
<LocatedFileStatus
> it
= fs
.listFiles(new Path(BACKUP_ROOT_DIR
), true);
498 while (it
.hasNext()) {
499 LOG
.debug(it
.next().getPath());