HBASE-26567 Remove IndexType from ChunkCreator (#3947)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestCleanupCompactedFileOnRegionClose.java
blobf43b0f579dd9fedeb22164dcd8efd5d1255c8149
1 /*
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org.apache.hadoop.hbase.regionserver;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertNotNull;
24 import static org.junit.Assert.assertTrue;
26 import java.util.Collection;
27 import org.apache.hadoop.hbase.HBaseClassTestRule;
28 import org.apache.hadoop.hbase.HBaseTestingUtil;
29 import org.apache.hadoop.hbase.TableName;
30 import org.apache.hadoop.hbase.client.Admin;
31 import org.apache.hadoop.hbase.client.Delete;
32 import org.apache.hadoop.hbase.client.Get;
33 import org.apache.hadoop.hbase.client.Put;
34 import org.apache.hadoop.hbase.client.Result;
35 import org.apache.hadoop.hbase.client.ResultScanner;
36 import org.apache.hadoop.hbase.client.Scan;
37 import org.apache.hadoop.hbase.client.Table;
38 import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
39 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
40 import org.apache.hadoop.hbase.testclassification.MediumTests;
41 import org.apache.hadoop.hbase.util.Bytes;
42 import org.junit.AfterClass;
43 import org.junit.BeforeClass;
44 import org.junit.ClassRule;
45 import org.junit.Test;
46 import org.junit.experimental.categories.Category;
48 @Category({MediumTests.class})
49 public class TestCleanupCompactedFileOnRegionClose {
51 @ClassRule
52 public static final HBaseClassTestRule CLASS_RULE =
53 HBaseClassTestRule.forClass(TestCleanupCompactedFileOnRegionClose.class);
55 private static HBaseTestingUtil util;
57 @BeforeClass
58 public static void beforeClass() throws Exception {
59 util = new HBaseTestingUtil();
60 util.getConfiguration().setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY,100);
61 util.getConfiguration().set("dfs.blocksize", "64000");
62 util.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
63 util.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY,"0");
64 util.startMiniCluster(2);
67 @AfterClass
68 public static void afterclass() throws Exception {
69 util.shutdownMiniCluster();
72 @Test
73 public void testCleanupOnClose() throws Exception {
74 TableName tableName = TableName.valueOf("testCleanupOnClose");
75 String familyName = "f";
76 byte[] familyNameBytes = Bytes.toBytes(familyName);
77 util.createTable(tableName, familyName);
79 Admin hBaseAdmin = util.getAdmin();
80 Table table = util.getConnection().getTable(tableName);
82 HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
83 Region region = rs.getRegions(tableName).get(0);
85 int refSFCount = 4;
86 for (int i = 0; i < refSFCount; i++) {
87 for (int j = 0; j < refSFCount; j++) {
88 Put put = new Put(Bytes.toBytes(j));
89 put.addColumn(familyNameBytes, Bytes.toBytes(i), Bytes.toBytes(j));
90 table.put(put);
92 util.flush(tableName);
94 assertEquals(refSFCount, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
96 //add a delete, to test wether we end up with an inconsistency post region close
97 Delete delete = new Delete(Bytes.toBytes(refSFCount-1));
98 table.delete(delete);
99 util.flush(tableName);
100 assertFalse(table.exists(new Get(Bytes.toBytes(refSFCount-1))));
102 //Create a scanner and keep it open to add references to StoreFileReaders
103 Scan scan = new Scan();
104 scan.withStopRow(Bytes.toBytes(refSFCount-2));
105 scan.setCaching(1);
106 ResultScanner scanner = table.getScanner(scan);
107 Result res = scanner.next();
108 assertNotNull(res);
109 assertEquals(refSFCount, res.getFamilyMap(familyNameBytes).size());
112 //Verify the references
113 int count = 0;
114 for (HStoreFile sf : (Collection<HStoreFile>)region.getStore(familyNameBytes).getStorefiles()) {
115 synchronized (sf) {
116 if (count < refSFCount) {
117 assertTrue(sf.isReferencedInReads());
118 } else {
119 assertFalse(sf.isReferencedInReads());
122 count++;
125 //Major compact to produce compacted storefiles that need to be cleaned up
126 util.compact(tableName, true);
127 assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
128 assertEquals(refSFCount+1,
129 ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager()
130 .getCompactedfiles().size());
132 //close then open the region to determine wether compacted storefiles get cleaned up on close
133 hBaseAdmin.unassign(region.getRegionInfo().getRegionName(), false);
134 hBaseAdmin.assign(region.getRegionInfo().getRegionName());
135 util.waitUntilNoRegionsInTransition(10000);
138 assertFalse("Deleted row should not exist",
139 table.exists(new Get(Bytes.toBytes(refSFCount-1))));
141 rs = util.getRSForFirstRegionInTable(tableName);
142 region = rs.getRegions(tableName).get(0);
143 assertEquals(1, region.getStoreFileList(new byte[][]{familyNameBytes}).size());
144 assertEquals(0,
145 ((HStore)region.getStore(familyNameBytes)).getStoreEngine().getStoreFileManager()
146 .getCompactedfiles().size());