3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.backup
;
21 import static org
.junit
.Assert
.assertNull
;
22 import static org
.junit
.Assert
.assertTrue
;
24 import java
.io
.IOException
;
25 import java
.util
.ArrayList
;
26 import java
.util
.List
;
28 import org
.apache
.commons
.logging
.Log
;
29 import org
.apache
.commons
.logging
.LogFactory
;
30 import org
.apache
.hadoop
.fs
.FileSystem
;
31 import org
.apache
.hadoop
.fs
.Path
;
32 import org
.apache
.hadoop
.hbase
.HConstants
;
33 import org
.apache
.hadoop
.hbase
.TableName
;
34 import org
.apache
.hadoop
.hbase
.backup
.impl
.BackupSystemTable
;
35 import org
.apache
.hadoop
.hbase
.client
.Admin
;
36 import org
.apache
.hadoop
.hbase
.client
.Connection
;
37 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
38 import org
.apache
.hadoop
.hbase
.coprocessor
.CoprocessorHost
;
39 import org
.apache
.hadoop
.hbase
.coprocessor
.MasterCoprocessorEnvironment
;
40 import org
.apache
.hadoop
.hbase
.coprocessor
.MasterObserver
;
41 import org
.apache
.hadoop
.hbase
.coprocessor
.ObserverContext
;
42 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
.SnapshotDescription
;
43 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
44 import org
.apache
.hadoop
.util
.ToolRunner
;
45 import org
.junit
.Before
;
46 import org
.junit
.Test
;
47 import org
.junit
.experimental
.categories
.Category
;
49 import org
.apache
.hadoop
.hbase
.shaded
.com
.google
.common
.collect
.Lists
;
52 * This class is only a base for other integration-level backup tests. Do not add tests here.
53 * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other
54 * tests should have their own classes and extend this one
56 @Category(LargeTests
.class)
57 public class TestBackupDeleteWithFailures
extends TestBackupBase
{
59 private static final Log LOG
= LogFactory
.getLog(TestBackupDeleteWithFailures
.class);
63 public static enum Failure
{
66 PRE_DELETE_SNAPSHOT_FAILURE
,
67 POST_DELETE_SNAPSHOT_FAILURE
70 public static class MasterSnapshotObserver
implements MasterObserver
{
73 List
<Failure
> failures
= new ArrayList
<Failure
>();
75 public void setFailures(Failure
... f
) {
77 for (int i
= 0; i
< f
.length
; i
++) {
83 public void preSnapshot(final ObserverContext
<MasterCoprocessorEnvironment
> ctx
,
84 final SnapshotDescription snapshot
, final TableDescriptor hTableDescriptor
)
87 if (failures
.contains(Failure
.PRE_SNAPSHOT_FAILURE
)) {
88 throw new IOException ("preSnapshot");
93 public void preDeleteSnapshot(ObserverContext
<MasterCoprocessorEnvironment
> ctx
,
94 SnapshotDescription snapshot
) throws IOException
{
95 if (failures
.contains(Failure
.PRE_DELETE_SNAPSHOT_FAILURE
)) {
96 throw new IOException ("preDeleteSnapshot");
101 public void postDeleteSnapshot(ObserverContext
<MasterCoprocessorEnvironment
> ctx
,
102 SnapshotDescription snapshot
) throws IOException
{
103 if (failures
.contains(Failure
.POST_DELETE_SNAPSHOT_FAILURE
)) {
104 throw new IOException ("postDeleteSnapshot");
111 * @throws java.lang.Exception
115 public void setUp() throws Exception
{
116 conf1
.set(CoprocessorHost
.MASTER_COPROCESSOR_CONF_KEY
,
117 MasterSnapshotObserver
.class.getName());
118 conf1
.setInt(HConstants
.HBASE_CLIENT_RETRIES_NUMBER
, 1);
123 private MasterSnapshotObserver
getMasterSnapshotObserver() {
124 return (MasterSnapshotObserver
)TEST_UTIL
.getHBaseCluster().getMaster()
125 .getMasterCoprocessorHost().findCoprocessor(MasterSnapshotObserver
.class.getName());
129 public void testBackupDeleteWithFailures() throws Exception
131 testBackupDeleteWithFailuresAfter(1, Failure
.PRE_DELETE_SNAPSHOT_FAILURE
);
132 testBackupDeleteWithFailuresAfter(0, Failure
.POST_DELETE_SNAPSHOT_FAILURE
);
133 testBackupDeleteWithFailuresAfter(1, Failure
.PRE_SNAPSHOT_FAILURE
);
136 private void testBackupDeleteWithFailuresAfter(int expected
, Failure
...failures
) throws Exception
{
137 LOG
.info("test repair backup delete on a single table with data and failures "+ failures
[0]);
138 List
<TableName
> tableList
= Lists
.newArrayList(table1
);
139 String backupId
= fullTableBackup(tableList
);
140 assertTrue(checkSucceeded(backupId
));
141 LOG
.info("backup complete");
142 String
[] backupIds
= new String
[] { backupId
};
143 BackupSystemTable table
= new BackupSystemTable(TEST_UTIL
.getConnection());
144 BackupInfo info
= table
.readBackupInfo(backupId
);
145 Path path
= new Path(info
.getBackupRootDir(), backupId
);
146 FileSystem fs
= FileSystem
.get(path
.toUri(), conf1
);
147 assertTrue(fs
.exists(path
));
149 Connection conn
= TEST_UTIL
.getConnection();
150 Admin admin
= conn
.getAdmin();
151 MasterSnapshotObserver observer
= getMasterSnapshotObserver();
153 observer
.setFailures(failures
);
155 getBackupAdmin().deleteBackups(backupIds
);
156 } catch(IOException e
) {
157 if(expected
!= 1) assertTrue(false);
160 // Verify that history length == expected after delete failure
161 assertTrue (table
.getBackupHistory().size() == expected
);
163 String
[] ids
= table
.getListOfBackupIdsFromDeleteOperation();
165 // Verify that we still have delete record in backup system table
167 assertTrue(ids
.length
== 1);
168 assertTrue(ids
[0].equals(backupId
));
173 // Now run repair command to repair "failed" delete operation
174 String
[] args
= new String
[] {"repair"};
176 observer
.setFailures(Failure
.NO_FAILURES
);
179 int ret
= ToolRunner
.run(conf1
, new BackupDriver(), args
);
180 assertTrue(ret
== 0);
181 // Verify that history length == 0
182 assertTrue (table
.getBackupHistory().size() == 0);
183 ids
= table
.getListOfBackupIdsFromDeleteOperation();
185 // Verify that we do not have delete record in backup system table