2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.backup
;
20 import static org
.junit
.Assert
.assertTrue
;
22 import java
.util
.List
;
24 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
25 import org
.apache
.hadoop
.hbase
.TableName
;
26 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupAdminImpl
;
27 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupSystemTable
;
28 import org
.apache
.hadoop
.hbase
.backup
.util
.BackupUtils
;
29 import org
.apache
.hadoop
.hbase
.client
.Admin
;
30 import org
.apache
.hadoop
.hbase
.client
.Connection
;
31 import org
.apache
.hadoop
.hbase
.client
.ConnectionFactory
;
32 import org
.apache
.hadoop
.hbase
.client
.Put
;
33 import org
.apache
.hadoop
.hbase
.client
.Table
;
34 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
35 import org
.apache
.hadoop
.hbase
.tool
.TestBulkLoadHFiles
;
36 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
37 import org
.apache
.hadoop
.hbase
.util
.Pair
;
38 import org
.junit
.Assert
;
39 import org
.junit
.ClassRule
;
40 import org
.junit
.Test
;
41 import org
.junit
.experimental
.categories
.Category
;
42 import org
.slf4j
.Logger
;
43 import org
.slf4j
.LoggerFactory
;
45 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Lists
;
53 * 6 Incremental backup t1
55 @Category(LargeTests
.class)
56 public class TestIncrementalBackupWithBulkLoad
extends TestBackupBase
{
59 public static final HBaseClassTestRule CLASS_RULE
=
60 HBaseClassTestRule
.forClass(TestIncrementalBackupWithBulkLoad
.class);
62 private static final Logger LOG
= LoggerFactory
.getLogger(TestIncrementalBackupDeleteTable
.class);
64 // implement all test cases in 1 test since incremental backup/restore has dependencies
66 public void TestIncBackupDeleteTable() throws Exception
{
67 String testName
= "TestIncBackupDeleteTable";
68 // #1 - create full backup for all tables
69 LOG
.info("create full backup image for all tables");
71 List
<TableName
> tables
= Lists
.newArrayList(table1
);
72 Connection conn
= ConnectionFactory
.createConnection(conf1
);
73 Admin admin
= conn
.getAdmin();
74 BackupAdminImpl client
= new BackupAdminImpl(conn
);
76 BackupRequest request
= createBackupRequest(BackupType
.FULL
, tables
, BACKUP_ROOT_DIR
);
77 String backupIdFull
= client
.backupTables(request
);
79 assertTrue(checkSucceeded(backupIdFull
));
81 // #2 - insert some data to table table1
82 Table t1
= conn
.getTable(table1
);
84 for (int i
= 0; i
< NB_ROWS_IN_BATCH
; i
++) {
85 p1
= new Put(Bytes
.toBytes("row-t1" + i
));
86 p1
.addColumn(famName
, qualName
, Bytes
.toBytes("val" + i
));
90 Assert
.assertEquals(TEST_UTIL
.countRows(t1
), NB_ROWS_IN_BATCH
* 2);
94 LOG
.debug("bulk loading into " + testName
);
95 int actual
= TestBulkLoadHFiles
.loadHFiles(testName
, table1Desc
, TEST_UTIL
, famName
,
96 qualName
, false, null, new byte[][][] {
97 new byte[][]{ Bytes
.toBytes("aaaa"), Bytes
.toBytes("cccc") },
98 new byte[][]{ Bytes
.toBytes("ddd"), Bytes
.toBytes("ooo") },
99 }, true, false, true, NB_ROWS_IN_BATCH
*2, NB_ROWS2
);
101 // #3 - incremental backup for table1
102 tables
= Lists
.newArrayList(table1
);
103 request
= createBackupRequest(BackupType
.INCREMENTAL
, tables
, BACKUP_ROOT_DIR
);
104 String backupIdIncMultiple
= client
.backupTables(request
);
105 assertTrue(checkSucceeded(backupIdIncMultiple
));
106 // #4 bulk load again
107 LOG
.debug("bulk loading into " + testName
);
108 int actual1
= TestBulkLoadHFiles
.loadHFiles(testName
, table1Desc
, TEST_UTIL
, famName
,
109 qualName
, false, null,
110 new byte[][][] { new byte[][] { Bytes
.toBytes("ppp"), Bytes
.toBytes("qqq") },
111 new byte[][] { Bytes
.toBytes("rrr"), Bytes
.toBytes("sss") }, },
112 true, false, true, NB_ROWS_IN_BATCH
* 2 + actual
, NB_ROWS2
);
114 // #5 - incremental backup for table1
115 tables
= Lists
.newArrayList(table1
);
116 request
= createBackupRequest(BackupType
.INCREMENTAL
, tables
, BACKUP_ROOT_DIR
);
117 String backupIdIncMultiple1
= client
.backupTables(request
);
118 assertTrue(checkSucceeded(backupIdIncMultiple1
));
119 // Delete all data in table1
120 TEST_UTIL
.deleteTableData(table1
);
121 // #5.1 - check tables for full restore */
122 Admin hAdmin
= TEST_UTIL
.getAdmin();
124 // #6 - restore incremental backup for table1
125 TableName
[] tablesRestoreIncMultiple
= new TableName
[] { table1
};
126 //TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
127 client
.restore(BackupUtils
.createRestoreRequest(BACKUP_ROOT_DIR
, backupIdIncMultiple1
,
128 false, tablesRestoreIncMultiple
, tablesRestoreIncMultiple
, true));
130 Table hTable
= conn
.getTable(table1
);
131 Assert
.assertEquals(TEST_UTIL
.countRows(hTable
), NB_ROWS_IN_BATCH
* 2 + actual
+ actual1
);
132 request
= createBackupRequest(BackupType
.FULL
, tables
, BACKUP_ROOT_DIR
);
134 backupIdFull
= client
.backupTables(request
);
135 try (final BackupSystemTable table
= new BackupSystemTable(conn
)) {
136 Pair
<Map
<TableName
, Map
<String
, Map
<String
, List
<Pair
<String
, Boolean
>>>>>, List
<byte[]>> pair
137 = table
.readBulkloadRows(tables
);
138 assertTrue("map still has " + pair
.getSecond().size() + " entries",
139 pair
.getSecond().isEmpty());
141 assertTrue(checkSucceeded(backupIdFull
));