HBASE-26921 Rewrite the counting cells part in TestMultiVersions (#4316)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / client / TestHbck.java
blob4d3cd393ef02b5b5808773f9b915d8a194a58c79
1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.client;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertNotEquals;
22 import static org.junit.Assert.assertTrue;
23 import java.io.IOException;
24 import java.util.Arrays;
25 import java.util.HashMap;
26 import java.util.List;
27 import java.util.Map;
28 import java.util.Optional;
29 import java.util.concurrent.CountDownLatch;
30 import java.util.stream.Collectors;
31 import org.apache.hadoop.hbase.Coprocessor;
32 import org.apache.hadoop.hbase.CoprocessorEnvironment;
33 import org.apache.hadoop.hbase.HBaseClassTestRule;
34 import org.apache.hadoop.hbase.HBaseTestingUtil;
35 import org.apache.hadoop.hbase.ServerName;
36 import org.apache.hadoop.hbase.TableName;
37 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
38 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
39 import org.apache.hadoop.hbase.coprocessor.MasterObserver;
40 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
41 import org.apache.hadoop.hbase.master.HMaster;
42 import org.apache.hadoop.hbase.master.RegionState;
43 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
44 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
45 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
46 import org.apache.hadoop.hbase.procedure2.Procedure;
47 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
48 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
49 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
50 import org.apache.hadoop.hbase.regionserver.HRegionServer;
51 import org.apache.hadoop.hbase.testclassification.ClientTests;
52 import org.apache.hadoop.hbase.testclassification.LargeTests;
53 import org.apache.hadoop.hbase.util.Bytes;
54 import org.apache.hadoop.hbase.util.Pair;
55 import org.junit.AfterClass;
56 import org.junit.Before;
57 import org.junit.BeforeClass;
58 import org.junit.ClassRule;
59 import org.junit.Rule;
60 import org.junit.Test;
61 import org.junit.experimental.categories.Category;
62 import org.junit.rules.TestName;
63 import org.junit.runner.RunWith;
64 import org.junit.runners.Parameterized;
65 import org.junit.runners.Parameterized.Parameter;
66 import org.junit.runners.Parameterized.Parameters;
67 import org.slf4j.Logger;
68 import org.slf4j.LoggerFactory;
69 import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
71 /**
72 * Class to test HBaseHbck. Spins up the minicluster once at test start and then takes it down
73 * afterward. Add any testing of HBaseHbck functionality here.
75 @RunWith(Parameterized.class)
76 @Category({ LargeTests.class, ClientTests.class })
77 public class TestHbck {
78 @ClassRule
79 public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHbck.class);
81 private static final Logger LOG = LoggerFactory.getLogger(TestHbck.class);
82 private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
84 @Rule
85 public TestName name = new TestName();
87 @SuppressWarnings("checkstyle:VisibilityModifier") @Parameter
88 public boolean async;
90 private static final TableName TABLE_NAME = TableName.valueOf(TestHbck.class.getSimpleName());
92 private static ProcedureExecutor<MasterProcedureEnv> procExec;
94 private static AsyncConnection ASYNC_CONN;
96 @Parameters(name = "{index}: async={0}")
97 public static List<Object[]> params() {
98 return Arrays.asList(new Object[] { false }, new Object[] { true });
101 private Hbck getHbck() throws Exception {
102 if (async) {
103 return ASYNC_CONN.getHbck().get();
104 } else {
105 return TEST_UTIL.getHbck();
109 @BeforeClass
110 public static void setUpBeforeClass() throws Exception {
111 TEST_UTIL.startMiniCluster(3);
112 TEST_UTIL.createMultiRegionTable(TABLE_NAME, Bytes.toBytes("family1"), 5);
113 procExec = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
114 ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
115 TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost().load(
116 FailingMergeAfterMetaUpdatedMasterObserver.class, Coprocessor.PRIORITY_USER,
117 TEST_UTIL.getHBaseCluster().getMaster().getConfiguration());
118 TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost().load(
119 FailingSplitAfterMetaUpdatedMasterObserver.class, Coprocessor.PRIORITY_USER,
120 TEST_UTIL.getHBaseCluster().getMaster().getConfiguration());
123 @AfterClass
124 public static void tearDownAfterClass() throws Exception {
125 Closeables.close(ASYNC_CONN, true);
126 TEST_UTIL.shutdownMiniCluster();
129 @Before
130 public void setUp() throws IOException {
131 TEST_UTIL.ensureSomeRegionServersAvailable(3);
134 public static class SuspendProcedure extends
135 ProcedureTestingUtility.NoopProcedure<MasterProcedureEnv> implements TableProcedureInterface {
136 public SuspendProcedure() {
137 super();
140 @SuppressWarnings({ "rawtypes", "unchecked" })
141 @Override
142 protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
143 // Always suspend the procedure
144 throw new ProcedureSuspendedException();
147 @Override
148 public TableName getTableName() {
149 return TABLE_NAME;
152 @Override
153 public TableOperationType getTableOperationType() {
154 return TableOperationType.READ;
158 @Test
159 public void testBypassProcedure() throws Exception {
160 // SuspendProcedure
161 final SuspendProcedure proc = new SuspendProcedure();
162 long procId = procExec.submitProcedure(proc);
163 Thread.sleep(500);
165 // bypass the procedure
166 List<Long> pids = Arrays.<Long> asList(procId);
167 List<Boolean> results = getHbck().bypassProcedure(pids, 30000, false, false);
168 assertTrue("Failed to by pass procedure!", results.get(0));
169 TEST_UTIL.waitFor(5000, () -> proc.isSuccess() && proc.isBypass());
170 LOG.info("{} finished", proc);
173 @Test
174 public void testSetTableStateInMeta() throws Exception {
175 Hbck hbck = getHbck();
176 // set table state to DISABLED
177 hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.DISABLED));
178 // Method {@link Hbck#setTableStateInMeta()} returns previous state, which in this case
179 // will be DISABLED
180 TableState prevState =
181 hbck.setTableStateInMeta(new TableState(TABLE_NAME, TableState.State.ENABLED));
182 assertTrue("Incorrect previous state! expeced=DISABLED, found=" + prevState.getState(),
183 prevState.isDisabled());
186 @Test
187 public void testSetRegionStateInMeta() throws Exception {
188 Hbck hbck = getHbck();
189 Admin admin = TEST_UTIL.getAdmin();
190 final List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
191 final AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
192 Map<String, RegionState.State> prevStates = new HashMap<>();
193 Map<String, RegionState.State> newStates = new HashMap<>();
194 final Map<String, Pair<RegionState.State, RegionState.State>> regionsMap = new HashMap<>();
195 regions.forEach(r -> {
196 RegionState prevState = am.getRegionStates().getRegionState(r);
197 prevStates.put(r.getEncodedName(), prevState.getState());
198 newStates.put(r.getEncodedName(), RegionState.State.CLOSED);
199 regionsMap.put(r.getEncodedName(),
200 new Pair<>(prevState.getState(), RegionState.State.CLOSED));
202 final Map<String, RegionState.State> result = hbck.setRegionStateInMeta(newStates);
203 result.forEach((k, v) -> {
204 RegionState.State prevState = regionsMap.get(k).getFirst();
205 assertEquals(prevState, v);
207 regions.forEach(r -> {
208 RegionState cachedState = am.getRegionStates().getRegionState(r.getEncodedName());
209 RegionState.State newState = regionsMap.get(r.getEncodedName()).getSecond();
210 assertEquals(newState, cachedState.getState());
212 hbck.setRegionStateInMeta(prevStates);
215 @Test
216 public void testAssigns() throws Exception {
217 Hbck hbck = getHbck();
218 try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
219 List<RegionInfo> regions = admin.getRegions(TABLE_NAME);
220 for (RegionInfo ri : regions) {
221 RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
222 .getRegionStates().getRegionState(ri.getEncodedName());
223 LOG.info("RS: {}", rs.toString());
225 List<Long> pids =
226 hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
227 waitOnPids(pids);
228 // Rerun the unassign. Should fail for all Regions since they already unassigned; failed
229 // unassign will manifest as all pids being -1 (ever since HBASE-24885).
230 pids =
231 hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
232 waitOnPids(pids);
233 for (long pid: pids) {
234 assertEquals(Procedure.NO_PROC_ID, pid);
236 // If we pass override, then we should be able to unassign EVEN THOUGH Regions already
237 // unassigned.... makes for a mess but operator might want to do this at an extreme when
238 // doing fixup of broke cluster.
239 pids =
240 hbck.unassigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()),
241 true);
242 waitOnPids(pids);
243 for (long pid: pids) {
244 assertNotEquals(Procedure.NO_PROC_ID, pid);
246 // Clean-up by bypassing all the unassigns we just made so tests can continue.
247 hbck.bypassProcedure(pids, 10000, true, true);
248 for (RegionInfo ri : regions) {
249 RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
250 .getRegionStates().getRegionState(ri.getEncodedName());
251 LOG.info("RS: {}", rs.toString());
252 assertTrue(rs.toString(), rs.isClosed());
254 pids =
255 hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
256 waitOnPids(pids);
257 // Rerun the assign. Should fail for all Regions since they already assigned; failed
258 // assign will manifest as all pids being -1 (ever since HBASE-24885).
259 pids =
260 hbck.assigns(regions.stream().map(r -> r.getEncodedName()).collect(Collectors.toList()));
261 for (long pid: pids) {
262 assertEquals(Procedure.NO_PROC_ID, pid);
264 for (RegionInfo ri : regions) {
265 RegionState rs = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
266 .getRegionStates().getRegionState(ri.getEncodedName());
267 LOG.info("RS: {}", rs.toString());
268 assertTrue(rs.toString(), rs.isOpened());
270 // What happens if crappy region list passed?
271 pids = hbck.assigns(
272 Arrays.stream(new String[] { "a", "some rubbish name" }).collect(Collectors.toList()));
273 for (long pid : pids) {
274 assertEquals(Procedure.NO_PROC_ID, pid);
279 @Test
280 public void testScheduleSCP() throws Exception {
281 HRegionServer testRs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
282 TEST_UTIL.loadTable(TEST_UTIL.getConnection().getTable(TABLE_NAME), Bytes.toBytes("family1"),
283 true);
284 ServerName serverName = testRs.getServerName();
285 Hbck hbck = getHbck();
286 List<Long> pids =
287 hbck.scheduleServerCrashProcedures(Arrays.asList(serverName));
288 assertTrue(pids.get(0) > 0);
289 LOG.info("pid is {}", pids.get(0));
291 List<Long> newPids =
292 hbck.scheduleServerCrashProcedures(Arrays.asList(serverName));
293 assertTrue(newPids.get(0) < 0);
294 LOG.info("pid is {}", newPids.get(0));
295 waitOnPids(pids);
298 @Test
299 public void testRunHbckChore() throws Exception {
300 HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
301 long endTimestamp = master.getHbckChore().getCheckingEndTimestamp();
302 Hbck hbck = getHbck();
303 boolean ran = false;
304 while (!ran) {
305 ran = hbck.runHbckChore();
306 if (ran) {
307 assertTrue(master.getHbckChore().getCheckingEndTimestamp() > endTimestamp);
312 public static class FailingSplitAfterMetaUpdatedMasterObserver
313 implements MasterCoprocessor, MasterObserver {
314 @SuppressWarnings("checkstyle:VisibilityModifier") public volatile CountDownLatch latch;
316 @Override
317 public void start(CoprocessorEnvironment e) throws IOException {
318 resetLatch();
321 @Override
322 public Optional<MasterObserver> getMasterObserver() {
323 return Optional.of(this);
326 @Override
327 public void preSplitRegionAfterMETAAction(ObserverContext<MasterCoprocessorEnvironment> ctx)
328 throws IOException {
329 LOG.info("I'm here");
330 latch.countDown();
331 throw new IOException("this procedure will fail at here forever");
334 public void resetLatch() {
335 this.latch = new CountDownLatch(1);
339 public static class FailingMergeAfterMetaUpdatedMasterObserver
340 implements MasterCoprocessor, MasterObserver {
341 @SuppressWarnings("checkstyle:VisibilityModifier") public volatile CountDownLatch latch;
343 @Override
344 public void start(CoprocessorEnvironment e) throws IOException {
345 resetLatch();
348 @Override
349 public Optional<MasterObserver> getMasterObserver() {
350 return Optional.of(this);
353 public void resetLatch() {
354 this.latch = new CountDownLatch(1);
357 @Override
358 public void postMergeRegionsCommitAction(
359 final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo[] regionsToMerge,
360 final RegionInfo mergedRegion) throws IOException {
361 latch.countDown();
362 throw new IOException("this procedure will fail at here forever");
366 private void waitOnPids(List<Long> pids) {
367 TEST_UTIL.waitFor(60000, () -> pids.stream().allMatch(procExec::isFinished));