2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.backup
;
21 import static org
.junit
.Assert
.assertTrue
;
23 import java
.io
.IOException
;
24 import java
.util
.ArrayList
;
25 import java
.util
.Collection
;
26 import java
.util
.List
;
28 import org
.apache
.commons
.logging
.Log
;
29 import org
.apache
.commons
.logging
.LogFactory
;
30 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
31 import org
.apache
.hadoop
.hbase
.HColumnDescriptor
;
32 import org
.apache
.hadoop
.hbase
.MiniHBaseCluster
;
33 import org
.apache
.hadoop
.hbase
.TableName
;
34 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupAdminImpl
;
35 import org
.apache
.hadoop
.hbase
.backup
.util
.BackupUtils
;
36 import org
.apache
.hadoop
.hbase
.client
.Connection
;
37 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
38 import org
.apache
.hadoop
.hbase
.client
.HBaseAdmin
;
39 import org
.apache
.hadoop
.hbase
.client
.HTable
;
40 import org
.apache
.hadoop
.hbase
.client
.Put
;
41 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
;
42 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
43 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
44 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
45 import org
.junit
.Assert
;
46 import org
.junit
.Test
;
47 import org
.junit
.experimental
.categories
.Category
;
48 import org
.junit
.runner
.RunWith
;
49 import org
.junit
.runners
.Parameterized
;
51 import org
.apache
.hadoop
.hbase
.shaded
.com
.google
.common
.collect
.Lists
;
53 @Category(LargeTests
.class)
54 @RunWith(Parameterized
.class)
55 public class TestIncrementalBackup
extends TestBackupBase
{
56 private static final Log LOG
= LogFactory
.getLog(TestIncrementalBackup
.class);
58 @Parameterized.Parameters
59 public static Collection
<Object
[]> data() {
60 provider
= "multiwal";
61 List
<Object
[]> params
= new ArrayList
<Object
[]>();
62 params
.add(new Object
[] { Boolean
.TRUE
});
66 public TestIncrementalBackup(Boolean b
) {
69 // implement all test cases in 1 test since incremental backup/restore has dependencies
71 public void TestIncBackupRestore() throws Exception
{
74 // #1 - create full backup for all tables
75 LOG
.info("create full backup image for all tables");
77 List
<TableName
> tables
= Lists
.newArrayList(table1
, table2
);
78 final byte[] fam3Name
= Bytes
.toBytes("f3");
79 table1Desc
.addFamily(new HColumnDescriptor(fam3Name
));
80 HBaseTestingUtility
.modifyTableSync(TEST_UTIL
.getAdmin(), table1Desc
);
82 Connection conn
= ConnectionFactory
.createConnection(conf1
);
84 insertIntoTable(conn
, table1
, fam3Name
, 3, NB_ROWS_FAM3
).close();
86 HBaseAdmin admin
= null;
87 admin
= (HBaseAdmin
) conn
.getAdmin();
88 BackupAdminImpl client
= new BackupAdminImpl(conn
);
90 BackupRequest request
= createBackupRequest(BackupType
.FULL
, tables
, BACKUP_ROOT_DIR
);
91 String backupIdFull
= client
.backupTables(request
);
93 assertTrue(checkSucceeded(backupIdFull
));
95 // #2 - insert some data to table
96 HTable t1
= insertIntoTable(conn
, table1
, famName
, 1, ADD_ROWS
);
97 LOG
.debug("writing " + ADD_ROWS
+ " rows to " + table1
);
99 Assert
.assertEquals(TEST_UTIL
.countRows(t1
), NB_ROWS_IN_BATCH
+ ADD_ROWS
+ NB_ROWS_FAM3
);
101 LOG
.debug("written " + ADD_ROWS
+ " rows to " + table1
);
103 HTable t2
= (HTable
) conn
.getTable(table2
);
105 for (int i
= 0; i
< 5; i
++) {
106 p2
= new Put(Bytes
.toBytes("row-t2" + i
));
107 p2
.addColumn(famName
, qualName
, Bytes
.toBytes("val" + i
));
111 Assert
.assertEquals(TEST_UTIL
.countRows(t2
), NB_ROWS_IN_BATCH
+ 5);
113 LOG
.debug("written " + 5 + " rows to " + table2
);
115 MiniHBaseCluster cluster
= TEST_UTIL
.getHBaseCluster();
116 List
<HRegion
> regions
= cluster
.getRegions(table1
);
118 byte[] name
= regions
.get(0).getRegionInfo().getRegionName();
119 long startSplitTime
= EnvironmentEdgeManager
.currentTime();
121 admin
.splitRegion(name
);
122 } catch (IOException e
) {
123 //although split fail, this may not affect following check
124 //In old split without AM2, if region's best split key is not found,
125 //there are not exception thrown. But in current API, exception
127 LOG
.debug("region is not splittable, because " + e
);
130 while (!admin
.isTableAvailable(table1
)) {
134 long endSplitTime
= EnvironmentEdgeManager
.currentTime();
137 LOG
.debug("split finished in =" + (endSplitTime
- startSplitTime
));
139 // #3 - incremental backup for multiple tables
140 tables
= Lists
.newArrayList(table1
, table2
);
141 request
= createBackupRequest(BackupType
.INCREMENTAL
, tables
, BACKUP_ROOT_DIR
);
142 String backupIdIncMultiple
= client
.backupTables(request
);
143 assertTrue(checkSucceeded(backupIdIncMultiple
));
145 // add column family f2 to table1
146 final byte[] fam2Name
= Bytes
.toBytes("f2");
147 table1Desc
.addFamily(new HColumnDescriptor(fam2Name
));
148 // drop column family f3
149 table1Desc
.removeFamily(fam3Name
);
150 HBaseTestingUtility
.modifyTableSync(TEST_UTIL
.getAdmin(), table1Desc
);
152 int NB_ROWS_FAM2
= 7;
153 HTable t3
= insertIntoTable(conn
, table1
, fam2Name
, 2, NB_ROWS_FAM2
);
156 // #3 - incremental backup for multiple tables
157 request
= createBackupRequest(BackupType
.INCREMENTAL
, tables
, BACKUP_ROOT_DIR
);
158 String backupIdIncMultiple2
= client
.backupTables(request
);
159 assertTrue(checkSucceeded(backupIdIncMultiple2
));
161 // #4 - restore full backup for all tables, without overwrite
162 TableName
[] tablesRestoreFull
= new TableName
[] { table1
, table2
};
164 TableName
[] tablesMapFull
= new TableName
[] { table1_restore
, table2_restore
};
166 LOG
.debug("Restoring full " + backupIdFull
);
167 client
.restore(BackupUtils
.createRestoreRequest(BACKUP_ROOT_DIR
, backupIdFull
, false,
168 tablesRestoreFull
, tablesMapFull
, false));
170 // #5.1 - check tables for full restore
171 HBaseAdmin hAdmin
= TEST_UTIL
.getHBaseAdmin();
172 assertTrue(hAdmin
.tableExists(table1_restore
));
173 assertTrue(hAdmin
.tableExists(table2_restore
));
177 // #5.2 - checking row count of tables for full restore
178 HTable hTable
= (HTable
) conn
.getTable(table1_restore
);
179 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
), NB_ROWS_IN_BATCH
+ NB_ROWS_FAM3
);
182 hTable
= (HTable
) conn
.getTable(table2_restore
);
183 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
), NB_ROWS_IN_BATCH
);
186 // #6 - restore incremental backup for multiple tables, with overwrite
187 TableName
[] tablesRestoreIncMultiple
= new TableName
[] { table1
, table2
};
188 TableName
[] tablesMapIncMultiple
= new TableName
[] { table1_restore
, table2_restore
};
189 client
.restore(BackupUtils
.createRestoreRequest(BACKUP_ROOT_DIR
, backupIdIncMultiple2
,
190 false, tablesRestoreIncMultiple
, tablesMapIncMultiple
, true));
192 hTable
= (HTable
) conn
.getTable(table1_restore
);
193 LOG
.debug("After incremental restore: " + hTable
.getDescriptor());
194 LOG
.debug("f1 has " + TEST_UTIL
.countRows(hTable
, famName
) + " rows");
195 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
, famName
), NB_ROWS_IN_BATCH
+ ADD_ROWS
);
196 LOG
.debug("f2 has " + TEST_UTIL
.countRows(hTable
, fam2Name
) + " rows");
197 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
, fam2Name
), NB_ROWS_FAM2
);
200 hTable
= (HTable
) conn
.getTable(table2_restore
);
201 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
), NB_ROWS_IN_BATCH
+ 5);