HBASE-26700 The way we bypass broken track file is not enough in StoreFileListFile...
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestCompactionArchiveIOException.java
blobc5af9a45228055b9308544afaa6901ca5cfe5999
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertFalse;
22 import static org.junit.Assert.assertNotNull;
23 import static org.junit.Assert.assertTrue;
24 import static org.mockito.ArgumentMatchers.any;
25 import static org.mockito.ArgumentMatchers.eq;
26 import static org.mockito.Mockito.doThrow;
27 import static org.mockito.Mockito.mock;
28 import static org.mockito.Mockito.spy;
30 import java.io.IOException;
31 import java.util.ArrayList;
32 import java.util.Collection;
33 import java.util.List;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FSDataOutputStream;
36 import org.apache.hadoop.fs.FileSystem;
37 import org.apache.hadoop.fs.Path;
38 import org.apache.hadoop.hbase.HBaseClassTestRule;
39 import org.apache.hadoop.hbase.HBaseTestingUtil;
40 import org.apache.hadoop.hbase.Stoppable;
41 import org.apache.hadoop.hbase.TableName;
42 import org.apache.hadoop.hbase.backup.FailedArchiveException;
43 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
44 import org.apache.hadoop.hbase.client.Put;
45 import org.apache.hadoop.hbase.client.RegionInfo;
46 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
47 import org.apache.hadoop.hbase.client.TableDescriptor;
48 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
49 import org.apache.hadoop.hbase.testclassification.MediumTests;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.CommonFSUtils;
52 import org.apache.hadoop.hbase.wal.WALFactory;
53 import org.junit.After;
54 import org.junit.Before;
55 import org.junit.ClassRule;
56 import org.junit.Rule;
57 import org.junit.Test;
58 import org.junit.experimental.categories.Category;
59 import org.junit.rules.TestName;
60 import org.mockito.Mockito;
62 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
64 /**
65 * Tests that archiving compacted files behaves correctly when encountering exceptions.
67 @Category(MediumTests.class)
68 public class TestCompactionArchiveIOException {
70 @ClassRule
71 public static final HBaseClassTestRule CLASS_RULE =
72 HBaseClassTestRule.forClass(TestCompactionArchiveIOException.class);
74 private static final String ERROR_FILE = "fffffffffffffffffdeadbeef";
76 public HBaseTestingUtil testUtil;
78 private Path testDir;
80 @Rule
81 public TestName name = new TestName();
83 @Before
84 public void setup() throws Exception {
85 testUtil = new HBaseTestingUtil();
86 testUtil.startMiniDFSCluster(1);
87 testDir = testUtil.getDataTestDirOnTestFS();
88 CommonFSUtils.setRootDir(testUtil.getConfiguration(), testDir);
91 @After
92 public void tearDown() throws Exception {
93 testUtil.cleanupTestDir();
94 testUtil.shutdownMiniDFSCluster();
97 @Test
98 public void testRemoveCompactedFilesWithException() throws Exception {
99 byte[] fam = Bytes.toBytes("f");
100 byte[] col = Bytes.toBytes("c");
101 byte[] val = Bytes.toBytes("val");
103 TableName tableName = TableName.valueOf(name.getMethodName());
104 TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
105 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
106 RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build();
107 HRegion region = initHRegion(htd, info);
108 RegionServerServices rss = mock(RegionServerServices.class);
109 List<HRegion> regions = new ArrayList<>();
110 regions.add(region);
111 Mockito.doReturn(regions).when(rss).getRegions();
113 // Create the cleaner object
114 final CompactedHFilesDischarger cleaner =
115 new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
116 // Add some data to the region and do some flushes
117 int batchSize = 10;
118 int fileCount = 10;
119 for (int f = 0; f < fileCount; f++) {
120 int start = f * batchSize;
121 for (int i = start; i < start + batchSize; i++) {
122 Put p = new Put(Bytes.toBytes("row" + i));
123 p.addColumn(fam, col, val);
124 region.put(p);
126 // flush them
127 region.flush(true);
130 HStore store = region.getStore(fam);
131 assertEquals(fileCount, store.getStorefilesCount());
133 Collection<HStoreFile> storefiles = store.getStorefiles();
134 // None of the files should be in compacted state.
135 for (HStoreFile file : storefiles) {
136 assertFalse(file.isCompactedAway());
139 StoreFileManager fileManager = store.getStoreEngine().getStoreFileManager();
140 Collection<HStoreFile> initialCompactedFiles = fileManager.getCompactedfiles();
141 assertTrue(initialCompactedFiles == null || initialCompactedFiles.isEmpty());
143 // Do compaction
144 region.compact(true);
146 // all prior store files should now be compacted
147 Collection<HStoreFile> compactedFilesPreClean = fileManager.getCompactedfiles();
148 assertNotNull(compactedFilesPreClean);
149 assertTrue(compactedFilesPreClean.size() > 0);
151 // add the dummy file to the store directory
152 HRegionFileSystem regionFS = region.getRegionFileSystem();
153 Path errFile = regionFS.getStoreFilePath(Bytes.toString(fam), ERROR_FILE);
154 FSDataOutputStream out = regionFS.getFileSystem().create(errFile);
155 out.writeInt(1);
156 out.close();
158 HStoreFile errStoreFile = new MockHStoreFile(testUtil, errFile, 1, 0, false, 1);
159 fileManager.addCompactionResults(
160 ImmutableList.of(errStoreFile), ImmutableList.of());
162 // cleanup compacted files
163 cleaner.chore();
165 // make sure the compacted files are cleared
166 Collection<HStoreFile> compactedFilesPostClean = fileManager.getCompactedfiles();
167 assertEquals(1, compactedFilesPostClean.size());
168 for (HStoreFile origFile : compactedFilesPreClean) {
169 assertFalse(compactedFilesPostClean.contains(origFile));
172 // close the region
173 try {
174 region.close();
175 } catch (FailedArchiveException e) {
176 // expected due to errorfile
177 assertEquals(1, e.getFailedFiles().size());
178 assertEquals(ERROR_FILE, e.getFailedFiles().iterator().next().getName());
182 private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOException {
183 Configuration conf = testUtil.getConfiguration();
184 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
185 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
186 Path tableDir = CommonFSUtils.getTableDir(testDir, htd.getTableName());
187 Path regionDir = new Path(tableDir, info.getEncodedName());
188 Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString());
190 FileSystem errFS = spy(testUtil.getTestFileSystem());
191 // Prior to HBASE-16964, when an exception is thrown archiving any compacted file,
192 // none of the other files are cleared from the compactedfiles list.
193 // Simulate this condition with a dummy file
194 doThrow(new IOException("Error for test")).when(errFS)
195 .rename(eq(new Path(storeDir, ERROR_FILE)), any());
197 HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
198 final Configuration walConf = new Configuration(conf);
199 CommonFSUtils.setRootDir(walConf, tableDir);
200 final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName());
201 HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
203 region.initialize();
205 return region;