HBASE-26567 Remove IndexType from ChunkCreator (#3947)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestCompactionPolicy.java
blobbbf7250dd042791a0b4844f1c41969dae27f3a71
1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 package org.apache.hadoop.hbase.regionserver;
21 import java.io.IOException;
22 import java.util.ArrayList;
23 import java.util.Arrays;
24 import java.util.List;
25 import org.apache.hadoop.conf.Configuration;
26 import org.apache.hadoop.fs.FileSystem;
27 import org.apache.hadoop.fs.Path;
28 import org.apache.hadoop.hbase.HBaseTestingUtil;
29 import org.apache.hadoop.hbase.HConstants;
30 import org.apache.hadoop.hbase.TableName;
31 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
32 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
33 import org.apache.hadoop.hbase.client.RegionInfo;
34 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
35 import org.apache.hadoop.hbase.client.TableDescriptor;
36 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
37 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
38 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
39 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
40 import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
41 import org.apache.hadoop.hbase.util.Bytes;
42 import org.apache.hadoop.hbase.util.CommonFSUtils;
43 import org.junit.After;
44 import org.junit.Assert;
45 import org.junit.Before;
46 import org.slf4j.Logger;
47 import org.slf4j.LoggerFactory;
49 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
51 public class TestCompactionPolicy {
52 private final static Logger LOG = LoggerFactory.getLogger(TestCompactionPolicy.class);
53 protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
55 protected Configuration conf;
56 protected HStore store;
57 private static final String DIR = TEST_UTIL.getDataTestDir(
58 TestCompactionPolicy.class.getSimpleName()).toString();
59 protected static Path TEST_FILE;
60 protected static final int minFiles = 3;
61 protected static final int maxFiles = 5;
63 protected static final long minSize = 10;
64 protected static final long maxSize = 2100;
66 private FSHLog hlog;
67 private HRegion region;
69 @Before
70 public void setUp() throws Exception {
71 config();
72 initialize();
75 /**
76 * setup config values necessary for store
78 protected void config() {
79 this.conf = TEST_UTIL.getConfiguration();
80 this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
81 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles);
82 this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles);
83 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize);
84 this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize);
85 this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F);
88 /**
89 * Setting up a Store
90 * @throws IOException with error
92 protected void initialize() throws IOException {
93 Path basedir = new Path(DIR);
94 String logName = "logs";
95 Path logdir = new Path(DIR, logName);
96 ColumnFamilyDescriptor familyDescriptor =
97 ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family"));
98 FileSystem fs = FileSystem.get(conf);
100 fs.delete(logdir, true);
102 TableDescriptor tableDescriptor =
103 TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table")))
104 .setColumnFamily(familyDescriptor).build();
105 RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
107 hlog = new FSHLog(fs, basedir, logName, conf);
108 hlog.init();
109 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
110 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
111 region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog);
112 region.close();
113 Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName());
114 region = new HRegion(tableDir, hlog, fs, conf, info, tableDescriptor, null);
116 store = new HStore(region, familyDescriptor, conf, false);
118 TEST_FILE = region.getRegionFileSystem().createTempName();
119 fs.createNewFile(TEST_FILE);
122 @After
123 public void tearDown() throws IOException {
124 IOException ex = null;
125 try {
126 region.close();
127 } catch (IOException e) {
128 LOG.warn("Caught Exception", e);
129 ex = e;
131 try {
132 hlog.close();
133 } catch (IOException e) {
134 LOG.warn("Caught Exception", e);
135 ex = e;
137 if (ex != null) {
138 throw ex;
142 ArrayList<Long> toArrayList(long... numbers) {
143 ArrayList<Long> result = new ArrayList<>();
144 for (long i : numbers) {
145 result.add(i);
147 return result;
150 List<HStoreFile> sfCreate(long... sizes) throws IOException {
151 ArrayList<Long> ageInDisk = new ArrayList<>();
152 for (int i = 0; i < sizes.length; i++) {
153 ageInDisk.add(0L);
155 return sfCreate(toArrayList(sizes), ageInDisk);
158 List<HStoreFile> sfCreate(ArrayList<Long> sizes, ArrayList<Long> ageInDisk) throws IOException {
159 return sfCreate(false, sizes, ageInDisk);
162 List<HStoreFile> sfCreate(boolean isReference, long... sizes) throws IOException {
163 ArrayList<Long> ageInDisk = new ArrayList<>(sizes.length);
164 for (int i = 0; i < sizes.length; i++) {
165 ageInDisk.add(0L);
167 return sfCreate(isReference, toArrayList(sizes), ageInDisk);
170 List<HStoreFile> sfCreate(boolean isReference, ArrayList<Long> sizes, ArrayList<Long> ageInDisk)
171 throws IOException {
172 List<HStoreFile> ret = Lists.newArrayList();
173 for (int i = 0; i < sizes.size(); i++) {
174 ret.add(new MockHStoreFile(TEST_UTIL, TEST_FILE, sizes.get(i), ageInDisk.get(i), isReference,
175 i));
177 return ret;
180 long[] getSizes(List<HStoreFile> sfList) {
181 long[] aNums = new long[sfList.size()];
182 for (int i = 0; i < sfList.size(); ++i) {
183 aNums[i] = sfList.get(i).getReader().length();
185 return aNums;
188 void compactEquals(List<HStoreFile> candidates, long... expected) throws IOException {
189 compactEquals(candidates, false, false, expected);
192 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, long... expected)
193 throws IOException {
194 compactEquals(candidates, forcemajor, false, expected);
197 void compactEquals(List<HStoreFile> candidates, boolean forcemajor, boolean isOffPeak,
198 long... expected) throws IOException {
199 store.forceMajor = forcemajor;
200 // Test Default compactions
201 CompactionRequestImpl result =
202 ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(
203 candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
204 List<HStoreFile> actual = new ArrayList<>(result.getFiles());
205 if (isOffPeak && !forcemajor) {
206 Assert.assertTrue(result.isOffPeak());
208 Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
209 store.forceMajor = false;