2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.client
;
20 import static org
.junit
.Assert
.assertEquals
;
21 import static org
.junit
.Assert
.assertTrue
;
23 import java
.io
.IOException
;
24 import java
.util
.ArrayList
;
25 import java
.util
.Arrays
;
26 import java
.util
.HashMap
;
27 import java
.util
.List
;
29 import java
.util
.Optional
;
30 import java
.util
.concurrent
.CountDownLatch
;
31 import java
.util
.stream
.Collectors
;
32 import org
.apache
.hadoop
.hbase
.Coprocessor
;
33 import org
.apache
.hadoop
.hbase
.CoprocessorEnvironment
;
34 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
35 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
36 import org
.apache
.hadoop
.hbase
.ServerName
;
37 import org
.apache
.hadoop
.hbase
.TableName
;
38 import org
.apache
.hadoop
.hbase
.coprocessor
.MasterCoprocessor
;
39 import org
.apache
.hadoop
.hbase
.coprocessor
.MasterCoprocessorEnvironment
;
40 import org
.apache
.hadoop
.hbase
.coprocessor
.MasterObserver
;
41 import org
.apache
.hadoop
.hbase
.coprocessor
.ObserverContext
;
42 import org
.apache
.hadoop
.hbase
.master
.HMaster
;
43 import org
.apache
.hadoop
.hbase
.master
.RegionState
;
44 import org
.apache
.hadoop
.hbase
.master
.assignment
.AssignmentManager
;
45 import org
.apache
.hadoop
.hbase
.master
.procedure
.MasterProcedureEnv
;
46 import org
.apache
.hadoop
.hbase
.master
.procedure
.TableProcedureInterface
;
47 import org
.apache
.hadoop
.hbase
.procedure2
.Procedure
;
48 import org
.apache
.hadoop
.hbase
.procedure2
.ProcedureExecutor
;
49 import org
.apache
.hadoop
.hbase
.procedure2
.ProcedureSuspendedException
;
50 import org
.apache
.hadoop
.hbase
.procedure2
.ProcedureTestingUtility
;
51 import org
.apache
.hadoop
.hbase
.regionserver
.HRegionServer
;
52 import org
.apache
.hadoop
.hbase
.testclassification
.ClientTests
;
53 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
54 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
55 import org
.apache
.hadoop
.hbase
.util
.Pair
;
56 import org
.junit
.AfterClass
;
57 import org
.junit
.Before
;
58 import org
.junit
.BeforeClass
;
59 import org
.junit
.ClassRule
;
60 import org
.junit
.Rule
;
61 import org
.junit
.Test
;
62 import org
.junit
.experimental
.categories
.Category
;
63 import org
.junit
.rules
.TestName
;
64 import org
.junit
.runner
.RunWith
;
65 import org
.junit
.runners
.Parameterized
;
66 import org
.junit
.runners
.Parameterized
.Parameter
;
67 import org
.junit
.runners
.Parameterized
.Parameters
;
68 import org
.slf4j
.Logger
;
69 import org
.slf4j
.LoggerFactory
;
71 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.io
.Closeables
;
73 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
76 * Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down
77 * afterward. Add any testing of HBaseHbck functionality here.
79 @RunWith(Parameterized
.class)
80 @Category({ LargeTests
.class, ClientTests
.class })
81 public class TestHbck
{
83 public static final HBaseClassTestRule CLASS_RULE
= HBaseClassTestRule
.forClass(TestHbck
.class);
85 private static final Logger LOG
= LoggerFactory
.getLogger(TestHbck
.class);
86 private final static HBaseTestingUtility TEST_UTIL
= new HBaseTestingUtility();
89 public TestName name
= new TestName();
94 private static final TableName TABLE_NAME
= TableName
.valueOf(TestHbck
.class.getSimpleName());
96 private static ProcedureExecutor
<MasterProcedureEnv
> procExec
;
98 private static AsyncConnection ASYNC_CONN
;
100 @Parameters(name
= "{index}: async={0}")
101 public static List
<Object
[]> params() {
102 return Arrays
.asList(new Object
[] { false }, new Object
[] { true });
105 private Hbck
getHbck() throws Exception
{
107 return ASYNC_CONN
.getHbck().get();
109 return TEST_UTIL
.getHbck();
114 public static void setUpBeforeClass() throws Exception
{
115 TEST_UTIL
.startMiniCluster(3);
116 TEST_UTIL
.createMultiRegionTable(TABLE_NAME
, Bytes
.toBytes("family1"), 5);
117 procExec
= TEST_UTIL
.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
118 ASYNC_CONN
= ConnectionFactory
.createAsyncConnection(TEST_UTIL
.getConfiguration()).get();
119 TEST_UTIL
.getHBaseCluster().getMaster().getMasterCoprocessorHost().load(
120 FailingMergeAfterMetaUpdatedMasterObserver
.class, Coprocessor
.PRIORITY_USER
,
121 TEST_UTIL
.getHBaseCluster().getMaster().getConfiguration());
122 TEST_UTIL
.getHBaseCluster().getMaster().getMasterCoprocessorHost().load(
123 FailingSplitAfterMetaUpdatedMasterObserver
.class, Coprocessor
.PRIORITY_USER
,
124 TEST_UTIL
.getHBaseCluster().getMaster().getConfiguration());
128 public static void tearDownAfterClass() throws Exception
{
129 Closeables
.close(ASYNC_CONN
, true);
130 TEST_UTIL
.shutdownMiniCluster();
134 public void setUp() throws IOException
{
135 TEST_UTIL
.ensureSomeRegionServersAvailable(3);
138 public static class SuspendProcedure
extends
139 ProcedureTestingUtility
.NoopProcedure
<MasterProcedureEnv
> implements TableProcedureInterface
{
140 public SuspendProcedure() {
144 @SuppressWarnings({ "rawtypes", "unchecked" })
146 protected Procedure
[] execute(final MasterProcedureEnv env
) throws ProcedureSuspendedException
{
147 // Always suspend the procedure
148 throw new ProcedureSuspendedException();
152 public TableName
getTableName() {
157 public TableOperationType
getTableOperationType() {
158 return TableOperationType
.READ
;
163 public void testBypassProcedure() throws Exception
{
165 final SuspendProcedure proc
= new SuspendProcedure();
166 long procId
= procExec
.submitProcedure(proc
);
169 // bypass the procedure
170 List
<Long
> pids
= Arrays
.<Long
> asList(procId
);
171 List
<Boolean
> results
= getHbck().bypassProcedure(pids
, 30000, false, false);
172 assertTrue("Failed to by pass procedure!", results
.get(0));
173 TEST_UTIL
.waitFor(5000, () -> proc
.isSuccess() && proc
.isBypass());
174 LOG
.info("{} finished", proc
);
178 public void testSetTableStateInMeta() throws Exception
{
179 Hbck hbck
= getHbck();
180 // set table state to DISABLED
181 hbck
.setTableStateInMeta(new TableState(TABLE_NAME
, TableState
.State
.DISABLED
));
182 // Method {@link Hbck#setTableStateInMeta()} returns previous state, which in this case
184 TableState prevState
=
185 hbck
.setTableStateInMeta(new TableState(TABLE_NAME
, TableState
.State
.ENABLED
));
186 assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState
.getState(),
187 prevState
.isDisabled());
191 public void testSetRegionStateInMeta() throws Exception
{
192 Hbck hbck
= getHbck();
193 try(Admin admin
= TEST_UTIL
.getAdmin()){
194 final List
<RegionInfo
> regions
= admin
.getRegions(TABLE_NAME
);
195 final AssignmentManager am
= TEST_UTIL
.getHBaseCluster().getMaster().getAssignmentManager();
196 final List
<RegionState
> prevStates
= new ArrayList
<>();
197 final List
<RegionState
> newStates
= new ArrayList
<>();
198 final Map
<String
, Pair
<RegionState
, RegionState
>> regionsMap
= new HashMap
<>();
199 regions
.forEach(r
-> {
200 RegionState prevState
= am
.getRegionStates().getRegionState(r
);
201 prevStates
.add(prevState
);
202 RegionState newState
= RegionState
.createForTesting(r
, RegionState
.State
.CLOSED
);
203 newStates
.add(newState
);
204 regionsMap
.put(r
.getEncodedName(), new Pair
<>(prevState
, newState
));
206 final List
<RegionState
> result
= hbck
.setRegionStateInMeta(newStates
);
207 result
.forEach(r
-> {
208 RegionState prevState
= regionsMap
.get(r
.getRegion().getEncodedName()).getFirst();
209 assertEquals(prevState
.getState(), r
.getState());
211 regions
.forEach(r
-> {
212 RegionState cachedState
= am
.getRegionStates().getRegionState(r
.getEncodedName());
213 RegionState newState
= regionsMap
.get(r
.getEncodedName()).getSecond();
214 assertEquals(newState
.getState(), cachedState
.getState());
216 hbck
.setRegionStateInMeta(prevStates
);
221 public void testAssigns() throws Exception
{
222 Hbck hbck
= getHbck();
223 try (Admin admin
= TEST_UTIL
.getConnection().getAdmin()) {
224 List
<RegionInfo
> regions
= admin
.getRegions(TABLE_NAME
);
225 for (RegionInfo ri
: regions
) {
226 RegionState rs
= TEST_UTIL
.getHBaseCluster().getMaster().getAssignmentManager()
227 .getRegionStates().getRegionState(ri
.getEncodedName());
228 LOG
.info("RS: {}", rs
.toString());
231 hbck
.unassigns(regions
.stream().map(r
-> r
.getEncodedName()).collect(Collectors
.toList()));
233 for (RegionInfo ri
: regions
) {
234 RegionState rs
= TEST_UTIL
.getHBaseCluster().getMaster().getAssignmentManager()
235 .getRegionStates().getRegionState(ri
.getEncodedName());
236 LOG
.info("RS: {}", rs
.toString());
237 assertTrue(rs
.toString(), rs
.isClosed());
240 hbck
.assigns(regions
.stream().map(r
-> r
.getEncodedName()).collect(Collectors
.toList()));
242 for (RegionInfo ri
: regions
) {
243 RegionState rs
= TEST_UTIL
.getHBaseCluster().getMaster().getAssignmentManager()
244 .getRegionStates().getRegionState(ri
.getEncodedName());
245 LOG
.info("RS: {}", rs
.toString());
246 assertTrue(rs
.toString(), rs
.isOpened());
248 // What happens if crappy region list passed?
250 Arrays
.stream(new String
[] { "a", "some rubbish name" }).collect(Collectors
.toList()));
251 for (long pid
: pids
) {
252 assertEquals(org
.apache
.hadoop
.hbase
.procedure2
.Procedure
.NO_PROC_ID
, pid
);
258 public void testScheduleSCP() throws Exception
{
259 HRegionServer testRs
= TEST_UTIL
.getRSForFirstRegionInTable(TABLE_NAME
);
260 TEST_UTIL
.loadTable(TEST_UTIL
.getConnection().getTable(TABLE_NAME
), Bytes
.toBytes("family1"),
262 ServerName serverName
= testRs
.getServerName();
263 Hbck hbck
= getHbck();
265 hbck
.scheduleServerCrashProcedure(Arrays
.asList(ProtobufUtil
.toServerName(serverName
)));
266 assertTrue(pids
.get(0) > 0);
267 LOG
.info("pid is {}", pids
.get(0));
270 hbck
.scheduleServerCrashProcedure(Arrays
.asList(ProtobufUtil
.toServerName(serverName
)));
271 assertTrue(newPids
.get(0) < 0);
272 LOG
.info("pid is {}", newPids
.get(0));
277 public void testRunHbckChore() throws Exception
{
278 HMaster master
= TEST_UTIL
.getMiniHBaseCluster().getMaster();
279 long endTimestamp
= master
.getHbckChore().getCheckingEndTimestamp();
280 Hbck hbck
= getHbck();
283 ran
= hbck
.runHbckChore();
285 assertTrue(master
.getHbckChore().getCheckingEndTimestamp() > endTimestamp
);
290 public static class FailingSplitAfterMetaUpdatedMasterObserver
291 implements MasterCoprocessor
, MasterObserver
{
292 public volatile CountDownLatch latch
;
295 public void start(CoprocessorEnvironment e
) throws IOException
{
300 public Optional
<MasterObserver
> getMasterObserver() {
301 return Optional
.of(this);
305 public void preSplitRegionAfterMETAAction(ObserverContext
<MasterCoprocessorEnvironment
> ctx
)
307 LOG
.info("I'm here");
309 throw new IOException("this procedure will fail at here forever");
312 public void resetLatch() {
313 this.latch
= new CountDownLatch(1);
317 public static class FailingMergeAfterMetaUpdatedMasterObserver
318 implements MasterCoprocessor
, MasterObserver
{
319 public volatile CountDownLatch latch
;
322 public void start(CoprocessorEnvironment e
) throws IOException
{
327 public Optional
<MasterObserver
> getMasterObserver() {
328 return Optional
.of(this);
331 public void resetLatch() {
332 this.latch
= new CountDownLatch(1);
336 public void postMergeRegionsCommitAction(
337 final ObserverContext
<MasterCoprocessorEnvironment
> ctx
, final RegionInfo
[] regionsToMerge
,
338 final RegionInfo mergedRegion
) throws IOException
{
340 throw new IOException("this procedure will fail at here forever");
344 private void waitOnPids(List
<Long
> pids
) {
345 TEST_UTIL
.waitFor(60000, () -> pids
.stream().allMatch(procExec
::isFinished
));