2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.regionserver
;
20 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
21 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
22 import org
.apache
.hadoop
.hbase
.HConstants
;
23 import org
.apache
.hadoop
.hbase
.TableName
;
24 import org
.apache
.hadoop
.hbase
.client
.Admin
;
25 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
26 import org
.apache
.hadoop
.hbase
.client
.Put
;
27 import org
.apache
.hadoop
.hbase
.client
.Scan
;
28 import org
.apache
.hadoop
.hbase
.client
.Table
;
29 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
30 import org
.apache
.hadoop
.hbase
.master
.cleaner
.TimeToLiveHFileCleaner
;
31 import org
.apache
.hadoop
.hbase
.regionserver
.compactions
.CompactionConfiguration
;
32 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
33 import org
.apache
.hadoop
.hbase
.testclassification
.RegionServerTests
;
34 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
35 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
;
36 import org
.junit
.After
;
37 import org
.junit
.AfterClass
;
38 import org
.junit
.Before
;
39 import org
.junit
.BeforeClass
;
40 import org
.junit
.ClassRule
;
41 import org
.junit
.Test
;
42 import org
.junit
.experimental
.categories
.Category
;
43 import org
.slf4j
.Logger
;
44 import org
.slf4j
.LoggerFactory
;
46 import java
.util
.ArrayList
;
47 import java
.util
.List
;
49 import static org
.junit
.Assert
.assertEquals
;
51 @Category({ LargeTests
.class, RegionServerTests
.class })
52 public class TestNotCleanupCompactedFileWhenRegionWarmup
{
53 private static final Logger LOG
=
54 LoggerFactory
.getLogger(TestNotCleanupCompactedFileWhenRegionWarmup
.class);
57 public static final HBaseClassTestRule CLASS_RULE
=
58 HBaseClassTestRule
.forClass(TestNotCleanupCompactedFileWhenRegionWarmup
.class);
60 private static HBaseTestingUtil TEST_UTIL
;
61 private static Admin admin
;
62 private static Table table
;
64 private static TableName TABLE_NAME
= TableName
.valueOf("TestCleanupCompactedFileAfterFailover");
65 private static byte[] ROW
= Bytes
.toBytes("row");
66 private static byte[] FAMILY
= Bytes
.toBytes("cf");
67 private static byte[] QUALIFIER
= Bytes
.toBytes("cq");
68 private static byte[] VALUE
= Bytes
.toBytes("value");
71 public static void beforeClass() throws Exception
{
72 TEST_UTIL
= new HBaseTestingUtil();
73 // Set the scanner lease to 20min, so the scanner can't be closed by RegionServer
74 TEST_UTIL
.getConfiguration().setInt(HConstants
.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD
, 1200000);
75 TEST_UTIL
.getConfiguration()
76 .setInt(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_MIN_KEY
, 100);
77 TEST_UTIL
.getConfiguration().set("dfs.blocksize", "64000");
78 TEST_UTIL
.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
79 TEST_UTIL
.getConfiguration().set(TimeToLiveHFileCleaner
.TTL_CONF_KEY
, "0");
80 TEST_UTIL
.startMiniCluster(1);
81 admin
= TEST_UTIL
.getAdmin();
85 public static void afterClass() throws Exception
{
86 TEST_UTIL
.shutdownMiniCluster();
90 public void before() throws Exception
{
91 TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(TABLE_NAME
);
92 builder
.setColumnFamily(ColumnFamilyDescriptorBuilder
.of(FAMILY
));
93 admin
.createTable(builder
.build());
94 TEST_UTIL
.waitTableAvailable(TABLE_NAME
);
95 table
= TEST_UTIL
.getConnection().getTable(TABLE_NAME
);
99 public void after() throws Exception
{
100 admin
.disableTable(TABLE_NAME
);
101 admin
.deleteTable(TABLE_NAME
);
105 public void testRegionWarmup() throws Exception
{
106 List
<HRegion
> regions
= new ArrayList
<>();
107 for (JVMClusterUtil
.RegionServerThread rsThread
: TEST_UTIL
.getHBaseCluster()
108 .getLiveRegionServerThreads()) {
109 HRegionServer rs
= rsThread
.getRegionServer();
110 if (rs
.getOnlineTables().contains(TABLE_NAME
)) {
111 regions
.addAll(rs
.getRegions(TABLE_NAME
));
114 assertEquals("Table should only have one region", 1, regions
.size());
115 HRegion region
= regions
.get(0);
116 HStore store
= region
.getStore(FAMILY
);
118 writeDataAndFlush(3, region
);
119 assertEquals(3, store
.getStorefilesCount());
121 // Open a scanner and not close, then the storefile will be referenced
122 store
.getScanner(new Scan(), null, 0);
123 region
.compact(true);
124 assertEquals(1, store
.getStorefilesCount());
125 // The compacted file should not be archived as there are references by user scanner
126 assertEquals(3, store
.getStoreEngine().getStoreFileManager().getCompactedfiles().size());
128 HStore newStore
= region
.instantiateHStore(ColumnFamilyDescriptorBuilder
.of(FAMILY
), true);
129 // Should not archive the compacted storefiles when region warmup
130 assertEquals(4, newStore
.getStorefilesCount());
132 newStore
= region
.instantiateHStore(ColumnFamilyDescriptorBuilder
.of(FAMILY
), false);
133 // Archived the compacted storefiles when region real open
134 assertEquals(1, newStore
.getStorefilesCount());
137 private void writeDataAndFlush(int fileNum
, HRegion region
) throws Exception
{
138 for (int i
= 0; i
< fileNum
; i
++) {
139 for (int j
= 0; j
< 100; j
++) {
140 table
.put(new Put(concat(ROW
, j
)).addColumn(FAMILY
, QUALIFIER
, concat(VALUE
, j
)));
146 private byte[] concat(byte[] base
, int index
) {
147 return Bytes
.toBytes(Bytes
.toString(base
) + "-" + index
);