2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.regionserver
;
21 import java
.io
.IOException
;
22 import java
.util
.ArrayList
;
23 import java
.util
.Arrays
;
24 import java
.util
.List
;
25 import org
.apache
.hadoop
.conf
.Configuration
;
26 import org
.apache
.hadoop
.fs
.FileSystem
;
27 import org
.apache
.hadoop
.fs
.Path
;
28 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
29 import org
.apache
.hadoop
.hbase
.HConstants
;
30 import org
.apache
.hadoop
.hbase
.TableName
;
31 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
32 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
33 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
34 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
35 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
36 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
37 import org
.apache
.hadoop
.hbase
.regionserver
.compactions
.CompactionConfiguration
;
38 import org
.apache
.hadoop
.hbase
.regionserver
.compactions
.CompactionRequestImpl
;
39 import org
.apache
.hadoop
.hbase
.regionserver
.compactions
.RatioBasedCompactionPolicy
;
40 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.FSHLog
;
41 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
42 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
43 import org
.junit
.After
;
44 import org
.junit
.Assert
;
45 import org
.junit
.Before
;
46 import org
.slf4j
.Logger
;
47 import org
.slf4j
.LoggerFactory
;
49 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Lists
;
51 public class TestCompactionPolicy
{
52 private final static Logger LOG
= LoggerFactory
.getLogger(TestCompactionPolicy
.class);
53 protected final static HBaseTestingUtil TEST_UTIL
= new HBaseTestingUtil();
55 protected Configuration conf
;
56 protected HStore store
;
57 private static final String DIR
= TEST_UTIL
.getDataTestDir(
58 TestCompactionPolicy
.class.getSimpleName()).toString();
59 protected static Path TEST_FILE
;
60 protected static final int minFiles
= 3;
61 protected static final int maxFiles
= 5;
63 protected static final long minSize
= 10;
64 protected static final long maxSize
= 2100;
67 private HRegion region
;
70 public void setUp() throws Exception
{
76 * setup config values necessary for store
78 protected void config() {
79 this.conf
= TEST_UTIL
.getConfiguration();
80 this.conf
.setLong(HConstants
.MAJOR_COMPACTION_PERIOD
, 0);
81 this.conf
.setInt(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_MIN_KEY
, minFiles
);
82 this.conf
.setInt(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_MAX_KEY
, maxFiles
);
83 this.conf
.setLong(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY
, minSize
);
84 this.conf
.setLong(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY
, maxSize
);
85 this.conf
.setFloat(CompactionConfiguration
.HBASE_HSTORE_COMPACTION_RATIO_KEY
, 1.0F
);
90 * @throws IOException with error
92 protected void initialize() throws IOException
{
93 Path basedir
= new Path(DIR
);
94 String logName
= "logs";
95 Path logdir
= new Path(DIR
, logName
);
96 ColumnFamilyDescriptor familyDescriptor
=
97 ColumnFamilyDescriptorBuilder
.of(Bytes
.toBytes("family"));
98 FileSystem fs
= FileSystem
.get(conf
);
100 fs
.delete(logdir
, true);
102 TableDescriptor tableDescriptor
=
103 TableDescriptorBuilder
.newBuilder(TableName
.valueOf(Bytes
.toBytes("table")))
104 .setColumnFamily(familyDescriptor
).build();
105 RegionInfo info
= RegionInfoBuilder
.newBuilder(tableDescriptor
.getTableName()).build();
107 hlog
= new FSHLog(fs
, basedir
, logName
, conf
);
109 ChunkCreator
.initialize(MemStoreLAB
.CHUNK_SIZE_DEFAULT
, false, 0, 0,
110 0, null, MemStoreLAB
.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT
);
111 region
= HRegion
.createHRegion(info
, basedir
, conf
, tableDescriptor
, hlog
);
113 Path tableDir
= CommonFSUtils
.getTableDir(basedir
, tableDescriptor
.getTableName());
114 region
= new HRegion(tableDir
, hlog
, fs
, conf
, info
, tableDescriptor
, null);
116 store
= new HStore(region
, familyDescriptor
, conf
, false);
118 TEST_FILE
= region
.getRegionFileSystem().createTempName();
119 fs
.createNewFile(TEST_FILE
);
123 public void tearDown() throws IOException
{
124 IOException ex
= null;
127 } catch (IOException e
) {
128 LOG
.warn("Caught Exception", e
);
133 } catch (IOException e
) {
134 LOG
.warn("Caught Exception", e
);
142 ArrayList
<Long
> toArrayList(long... numbers
) {
143 ArrayList
<Long
> result
= new ArrayList
<>();
144 for (long i
: numbers
) {
150 List
<HStoreFile
> sfCreate(long... sizes
) throws IOException
{
151 ArrayList
<Long
> ageInDisk
= new ArrayList
<>();
152 for (int i
= 0; i
< sizes
.length
; i
++) {
155 return sfCreate(toArrayList(sizes
), ageInDisk
);
158 List
<HStoreFile
> sfCreate(ArrayList
<Long
> sizes
, ArrayList
<Long
> ageInDisk
) throws IOException
{
159 return sfCreate(false, sizes
, ageInDisk
);
162 List
<HStoreFile
> sfCreate(boolean isReference
, long... sizes
) throws IOException
{
163 ArrayList
<Long
> ageInDisk
= new ArrayList
<>(sizes
.length
);
164 for (int i
= 0; i
< sizes
.length
; i
++) {
167 return sfCreate(isReference
, toArrayList(sizes
), ageInDisk
);
170 List
<HStoreFile
> sfCreate(boolean isReference
, ArrayList
<Long
> sizes
, ArrayList
<Long
> ageInDisk
)
172 List
<HStoreFile
> ret
= Lists
.newArrayList();
173 for (int i
= 0; i
< sizes
.size(); i
++) {
174 ret
.add(new MockHStoreFile(TEST_UTIL
, TEST_FILE
, sizes
.get(i
), ageInDisk
.get(i
), isReference
,
180 long[] getSizes(List
<HStoreFile
> sfList
) {
181 long[] aNums
= new long[sfList
.size()];
182 for (int i
= 0; i
< sfList
.size(); ++i
) {
183 aNums
[i
] = sfList
.get(i
).getReader().length();
188 void compactEquals(List
<HStoreFile
> candidates
, long... expected
) throws IOException
{
189 compactEquals(candidates
, false, false, expected
);
192 void compactEquals(List
<HStoreFile
> candidates
, boolean forcemajor
, long... expected
)
194 compactEquals(candidates
, forcemajor
, false, expected
);
197 void compactEquals(List
<HStoreFile
> candidates
, boolean forcemajor
, boolean isOffPeak
,
198 long... expected
) throws IOException
{
199 store
.forceMajor
= forcemajor
;
200 // Test Default compactions
201 CompactionRequestImpl result
=
202 ((RatioBasedCompactionPolicy
) store
.storeEngine
.getCompactionPolicy()).selectCompaction(
203 candidates
, new ArrayList
<>(), false, isOffPeak
, forcemajor
);
204 List
<HStoreFile
> actual
= new ArrayList
<>(result
.getFiles());
205 if (isOffPeak
&& !forcemajor
) {
206 Assert
.assertTrue(result
.isOffPeak());
208 Assert
.assertEquals(Arrays
.toString(expected
), Arrays
.toString(getSizes(actual
)));
209 store
.forceMajor
= false;