2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.regionserver
;
20 import static org
.junit
.Assert
.assertEquals
;
22 import java
.io
.FileNotFoundException
;
23 import java
.io
.IOException
;
24 import java
.util
.List
;
25 import org
.apache
.hadoop
.conf
.Configuration
;
26 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
27 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
28 import org
.apache
.hadoop
.hbase
.TableName
;
29 import org
.apache
.hadoop
.hbase
.Waiter
;
30 import org
.apache
.hadoop
.hbase
.client
.Admin
;
31 import org
.apache
.hadoop
.hbase
.client
.CompactionState
;
32 import org
.apache
.hadoop
.hbase
.client
.Put
;
33 import org
.apache
.hadoop
.hbase
.client
.Table
;
34 import org
.apache
.hadoop
.hbase
.testclassification
.MediumTests
;
35 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
36 import org
.apache
.hadoop
.hbase
.util
.JVMClusterUtil
.RegionServerThread
;
37 import org
.junit
.After
;
38 import org
.junit
.AfterClass
;
39 import org
.junit
.Assert
;
40 import org
.junit
.BeforeClass
;
41 import org
.junit
.ClassRule
;
42 import org
.junit
.Test
;
43 import org
.junit
.experimental
.categories
.Category
;
44 import org
.slf4j
.Logger
;
45 import org
.slf4j
.LoggerFactory
;
48 * This class tests the scenario where a store refresh happens due to a file not found during scan,
49 * after a compaction but before the compacted files are archived. At this state we test for a split
52 @Category(MediumTests
.class)
53 public class TestCompactionFileNotFound
{
56 public static final HBaseClassTestRule CLASS_RULE
=
57 HBaseClassTestRule
.forClass(TestCompactionFileNotFound
.class);
59 private static final Logger LOG
= LoggerFactory
.getLogger(TestCompactionFileNotFound
.class);
60 private static final HBaseTestingUtil util
= new HBaseTestingUtil();
62 private static final TableName TEST_TABLE
= TableName
.valueOf("test");
63 private static final byte[] TEST_FAMILY
= Bytes
.toBytes("f1");
65 private static final byte[] ROW_A
= Bytes
.toBytes("aaa");
66 private static final byte[] ROW_B
= Bytes
.toBytes("bbb");
67 private static final byte[] ROW_C
= Bytes
.toBytes("ccc");
69 private static final byte[] qualifierCol1
= Bytes
.toBytes("col1");
71 private static final byte[] bytes1
= Bytes
.toBytes(1);
72 private static final byte[] bytes2
= Bytes
.toBytes(2);
73 private static final byte[] bytes3
= Bytes
.toBytes(3);
78 public static void setupBeforeClass() throws Exception
{
79 Configuration conf
= util
.getConfiguration();
80 conf
.setInt("hbase.hfile.compaction.discharger.interval",
82 util
.startMiniCluster(3);
86 public static void tearDownAfterClass() throws Exception
{
87 util
.shutdownMiniCluster();
91 public void after() throws Exception
{
97 util
.deleteTable(TEST_TABLE
);
102 public void testSplitAfterRefresh() throws Exception
{
103 Admin admin
= util
.getAdmin();
104 table
= util
.createTable(TEST_TABLE
, TEST_FAMILY
);
107 // Create Multiple store files
108 Put puta
= new Put(ROW_A
);
109 puta
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes1
);
111 admin
.flush(TEST_TABLE
);
113 Put putb
= new Put(ROW_B
);
114 putb
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes2
);
116 admin
.flush(TEST_TABLE
);
118 Put putc
= new Put(ROW_C
);
119 putc
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes3
);
121 admin
.flush(TEST_TABLE
);
123 admin
.compact(TEST_TABLE
);
124 while (admin
.getCompactionState(TEST_TABLE
) != CompactionState
.NONE
) {
128 HRegion hr1
= (HRegion
) util
.getRSForFirstRegionInTable(TEST_TABLE
)
129 .getRegionByEncodedName(admin
.getRegions(TEST_TABLE
).get(0).getEncodedName());
130 // Refresh store files post compaction, this should not open already compacted files
131 hr1
.refreshStoreFiles(true);
132 int numRegionsBeforeSplit
= admin
.getRegions(TEST_TABLE
).size();
133 // Check if we can successfully split after compaction
134 admin
.splitRegionAsync(admin
.getRegions(TEST_TABLE
).get(0).getEncodedNameAsBytes(), ROW_C
)
136 util
.waitFor(20000, new Waiter
.Predicate
<Exception
>() {
138 public boolean evaluate() throws Exception
{
139 int numRegionsAfterSplit
= 0;
140 List
<RegionServerThread
> rst
= util
.getMiniHBaseCluster().getLiveRegionServerThreads();
141 for (RegionServerThread t
: rst
) {
142 numRegionsAfterSplit
+= t
.getRegionServer().getRegions(TEST_TABLE
).size();
144 // Make sure that the split went through and all the regions are assigned
145 return (numRegionsAfterSplit
== numRegionsBeforeSplit
+ 1
146 && admin
.isTableAvailable(TEST_TABLE
));
149 // Split at this point should not result in the RS being aborted
150 assertEquals(3, util
.getMiniHBaseCluster().getLiveRegionServerThreads().size());
159 public void testCompactionAfterRefresh() throws Exception
{
160 Admin admin
= util
.getAdmin();
161 table
= util
.createTable(TEST_TABLE
, TEST_FAMILY
);
163 // Create Multiple store files
164 Put puta
= new Put(ROW_A
);
165 puta
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes1
);
167 admin
.flush(TEST_TABLE
);
169 Put putb
= new Put(ROW_B
);
170 putb
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes2
);
172 admin
.flush(TEST_TABLE
);
174 Put putc
= new Put(ROW_C
);
175 putc
.addColumn(TEST_FAMILY
, qualifierCol1
, bytes3
);
177 admin
.flush(TEST_TABLE
);
179 admin
.compact(TEST_TABLE
);
180 while (admin
.getCompactionState(TEST_TABLE
) != CompactionState
.NONE
) {
184 HRegion hr1
= (HRegion
) util
.getRSForFirstRegionInTable(TEST_TABLE
)
185 .getRegionByEncodedName(admin
.getRegions(TEST_TABLE
).get(0).getEncodedName());
186 // Refresh store files post compaction, this should not open already compacted files
187 hr1
.refreshStoreFiles(true);
188 // Archive the store files and try another compaction to see if all is good
189 for (HStore store
: hr1
.getStores()) {
190 store
.closeAndArchiveCompactedFiles();
194 } catch (IOException e
) {
195 LOG
.error("Got an exception during compaction", e
);
196 if (e
instanceof FileNotFoundException
) {
197 Assert
.fail("Got a FNFE during compaction");