HBASE-26700 The way we bypass broken track file is not enough in StoreFileListFile...
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestCleanupCompactedFileAfterFailover.java
blobb3c12fa36839bdfc855833deb1bbe839d6c5153e
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertNotNull;
23 import java.util.ArrayList;
24 import java.util.List;
26 import org.apache.hadoop.hbase.HBaseClassTestRule;
27 import org.apache.hadoop.hbase.HBaseTestingUtil;
28 import org.apache.hadoop.hbase.HConstants;
29 import org.apache.hadoop.hbase.TableName;
30 import org.apache.hadoop.hbase.client.Admin;
31 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
32 import org.apache.hadoop.hbase.client.Put;
33 import org.apache.hadoop.hbase.client.Scan;
34 import org.apache.hadoop.hbase.client.Table;
35 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
36 import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
37 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
38 import org.apache.hadoop.hbase.testclassification.LargeTests;
39 import org.apache.hadoop.hbase.util.Bytes;
40 import org.apache.hadoop.hbase.util.JVMClusterUtil;
41 import org.junit.After;
42 import org.junit.AfterClass;
43 import org.junit.Before;
44 import org.junit.BeforeClass;
45 import org.junit.ClassRule;
46 import org.junit.Test;
47 import org.junit.experimental.categories.Category;
48 import org.slf4j.Logger;
49 import org.slf4j.LoggerFactory;
51 @Category({LargeTests.class})
52 public class TestCleanupCompactedFileAfterFailover {
54 private static final Logger LOG =
55 LoggerFactory.getLogger(TestCleanupCompactedFileAfterFailover.class);
57 @ClassRule
58 public static final HBaseClassTestRule CLASS_RULE =
59 HBaseClassTestRule.forClass(TestCleanupCompactedFileAfterFailover.class);
61 private static HBaseTestingUtil TEST_UTIL;
62 private static Admin admin;
63 private static Table table;
65 private static TableName TABLE_NAME = TableName.valueOf("TestCleanupCompactedFileAfterFailover");
66 private static byte[] ROW = Bytes.toBytes("row");
67 private static byte[] FAMILY = Bytes.toBytes("cf");
68 private static byte[] QUALIFIER = Bytes.toBytes("cq");
69 private static byte[] VALUE = Bytes.toBytes("value");
70 private static final int RS_NUMBER = 5;
72 @BeforeClass
73 public static void beforeClass() throws Exception {
74 TEST_UTIL = new HBaseTestingUtil();
75 // Set the scanner lease to 20min, so the scanner can't be closed by RegionServer
76 TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1200000);
77 TEST_UTIL.getConfiguration()
78 .setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
79 TEST_UTIL.getConfiguration().set("dfs.blocksize", "64000");
80 TEST_UTIL.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
81 TEST_UTIL.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0");
82 TEST_UTIL.startMiniCluster(RS_NUMBER);
83 admin = TEST_UTIL.getAdmin();
86 @AfterClass
87 public static void afterClass() throws Exception {
88 TEST_UTIL.shutdownMiniCluster();
91 @Before
92 public void before() throws Exception {
93 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME);
94 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
95 admin.createTable(builder.build());
96 TEST_UTIL.waitTableAvailable(TABLE_NAME);
97 table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
100 @After
101 public void after() throws Exception {
102 admin.disableTable(TABLE_NAME);
103 admin.deleteTable(TABLE_NAME);
106 @Test
107 public void testCleanupAfterFailoverWithCompactOnce() throws Exception {
108 testCleanupAfterFailover(1);
111 @Test
112 public void testCleanupAfterFailoverWithCompactTwice() throws Exception {
113 testCleanupAfterFailover(2);
116 @Test
117 public void testCleanupAfterFailoverWithCompactThreeTimes() throws Exception {
118 testCleanupAfterFailover(3);
121 private void testCleanupAfterFailover(int compactNum) throws Exception {
122 HRegionServer rsServedTable = null;
123 List<HRegion> regions = new ArrayList<>();
124 for (JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster()
125 .getLiveRegionServerThreads()) {
126 HRegionServer rs = rsThread.getRegionServer();
127 if (rs.getOnlineTables().contains(TABLE_NAME)) {
128 regions.addAll(rs.getRegions(TABLE_NAME));
129 rsServedTable = rs;
132 assertNotNull(rsServedTable);
133 assertEquals("Table should only have one region", 1, regions.size());
134 HRegion region = regions.get(0);
135 HStore store = region.getStore(FAMILY);
137 writeDataAndFlush(3, region);
138 assertEquals(3, store.getStorefilesCount());
140 // Open a scanner and not close, then the storefile will be referenced
141 store.getScanner(new Scan(), null, 0);
143 region.compact(true);
144 assertEquals(1, store.getStorefilesCount());
145 // The compacted file should not be archived as there are references by user scanner
146 assertEquals(3, store.getStoreEngine().getStoreFileManager().getCompactedfiles().size());
148 for (int i = 1; i < compactNum; i++) {
149 // Compact again
150 region.compact(true);
151 assertEquals(1, store.getStorefilesCount());
152 store.closeAndArchiveCompactedFiles();
153 // Compacted storefiles still be 3 as the new compacted storefile was archived
154 assertEquals(3, store.getStoreEngine().getStoreFileManager().getCompactedfiles().size());
157 int walNum = rsServedTable.getWALs().size();
158 // Roll WAL
159 rsServedTable.getWalRoller().requestRollAll();
160 // Flush again
161 region.flush(true);
162 // The WAL which contains compaction event marker should be archived
163 assertEquals("The old WAL should be archived", walNum, rsServedTable.getWALs().size());
165 rsServedTable.kill();
166 // Sleep to wait failover
167 Thread.sleep(3000);
168 TEST_UTIL.waitTableAvailable(TABLE_NAME);
170 regions.clear();
171 for (JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster()
172 .getLiveRegionServerThreads()) {
173 HRegionServer rs = rsThread.getRegionServer();
174 if (rs != rsServedTable && rs.getOnlineTables().contains(TABLE_NAME)) {
175 regions.addAll(rs.getRegions(TABLE_NAME));
178 assertEquals("Table should only have one region", 1, regions.size());
179 region = regions.get(0);
180 store = region.getStore(FAMILY);
181 // The compacted storefile should be cleaned and only have 1 storefile
182 assertEquals(1, store.getStorefilesCount());
185 private void writeDataAndFlush(int fileNum, HRegion region) throws Exception {
186 for (int i = 0; i < fileNum; i++) {
187 for (int j = 0; j < 100; j++) {
188 table.put(new Put(concat(ROW, j)).addColumn(FAMILY, QUALIFIER, concat(VALUE, j)));
190 region.flush(true);
194 private byte[] concat(byte[] base, int index) {
195 return Bytes.toBytes(Bytes.toString(base) + "-" + index);