HBASE-26567 Remove IndexType from ChunkCreator (#3947)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestScannerRetriableFailure.java
blob16fe4e30dbfb352664b272169ae76e38387f2df5
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertTrue;
23 import java.io.IOException;
24 import java.util.ArrayList;
25 import java.util.List;
26 import java.util.Optional;
27 import org.apache.hadoop.conf.Configuration;
28 import org.apache.hadoop.fs.FileSystem;
29 import org.apache.hadoop.fs.Path;
30 import org.apache.hadoop.hbase.HBaseClassTestRule;
31 import org.apache.hadoop.hbase.HBaseTestingUtil;
32 import org.apache.hadoop.hbase.TableName;
33 import org.apache.hadoop.hbase.TableNameTestRule;
34 import org.apache.hadoop.hbase.client.Durability;
35 import org.apache.hadoop.hbase.client.Put;
36 import org.apache.hadoop.hbase.client.Result;
37 import org.apache.hadoop.hbase.client.ResultScanner;
38 import org.apache.hadoop.hbase.client.Scan;
39 import org.apache.hadoop.hbase.client.Table;
40 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
41 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
42 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
43 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
44 import org.apache.hadoop.hbase.coprocessor.RegionObserver;
45 import org.apache.hadoop.hbase.testclassification.MediumTests;
46 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
47 import org.apache.hadoop.hbase.util.Bytes;
48 import org.junit.AfterClass;
49 import org.junit.BeforeClass;
50 import org.junit.ClassRule;
51 import org.junit.Rule;
52 import org.junit.Test;
53 import org.junit.experimental.categories.Category;
54 import org.slf4j.Logger;
55 import org.slf4j.LoggerFactory;
57 @Category({RegionServerTests.class, MediumTests.class})
58 public class TestScannerRetriableFailure {
60 @ClassRule
61 public static final HBaseClassTestRule CLASS_RULE =
62 HBaseClassTestRule.forClass(TestScannerRetriableFailure.class);
64 private static final Logger LOG = LoggerFactory.getLogger(TestScannerRetriableFailure.class);
66 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
68 private static final String FAMILY_NAME_STR = "f";
69 private static final byte[] FAMILY_NAME = Bytes.toBytes(FAMILY_NAME_STR);
71 @Rule public TableNameTestRule testTable = new TableNameTestRule();
73 public static class FaultyScannerObserver implements RegionCoprocessor, RegionObserver {
74 private int faults = 0;
76 @Override
77 public Optional<RegionObserver> getRegionObserver() {
78 return Optional.of(this);
81 @Override
82 public boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> e,
83 final InternalScanner s, final List<Result> results,
84 final int limit, final boolean hasMore) throws IOException {
85 final TableName tableName = e.getEnvironment().getRegionInfo().getTable();
86 if (!tableName.isSystemTable() && (faults++ % 2) == 0) {
87 LOG.debug(" Injecting fault in table=" + tableName + " scanner");
88 throw new IOException("injected fault");
90 return hasMore;
94 private static void setupConf(Configuration conf) {
95 conf.setLong("hbase.hstore.compaction.min", 20);
96 conf.setLong("hbase.hstore.compaction.max", 39);
97 conf.setLong("hbase.hstore.blockingStoreFiles", 40);
99 conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FaultyScannerObserver.class.getName());
102 @BeforeClass
103 public static void setup() throws Exception {
104 setupConf(UTIL.getConfiguration());
105 UTIL.startMiniCluster(1);
108 @AfterClass
109 public static void tearDown() throws Exception {
110 try {
111 UTIL.shutdownMiniCluster();
112 } catch (Exception e) {
113 LOG.warn("failure shutting down cluster", e);
117 @Test
118 public void testFaultyScanner() throws Exception {
119 TableName tableName = testTable.getTableName();
120 Table table = UTIL.createTable(tableName, FAMILY_NAME);
121 try {
122 final int NUM_ROWS = 100;
123 loadTable(table, NUM_ROWS);
124 checkTableRows(table, NUM_ROWS);
125 } finally {
126 table.close();
130 // ==========================================================================
131 // Helpers
132 // ==========================================================================
133 private FileSystem getFileSystem() {
134 return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
137 private Path getRootDir() {
138 return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
141 public void loadTable(final Table table, int numRows) throws IOException {
142 List<Put> puts = new ArrayList<>(numRows);
143 for (int i = 0; i < numRows; ++i) {
144 byte[] row = Bytes.toBytes(String.format("%09d", i));
145 Put put = new Put(row);
146 put.setDurability(Durability.SKIP_WAL);
147 put.addColumn(FAMILY_NAME, null, row);
148 table.put(put);
152 private void checkTableRows(final Table table, int numRows) throws Exception {
153 Scan scan = new Scan();
154 scan.setCaching(1);
155 scan.setCacheBlocks(false);
156 ResultScanner scanner = table.getScanner(scan);
157 try {
158 int count = 0;
159 for (int i = 0; i < numRows; ++i) {
160 byte[] row = Bytes.toBytes(String.format("%09d", i));
161 Result result = scanner.next();
162 assertTrue(result != null);
163 assertTrue(Bytes.equals(row, result.getRow()));
164 count++;
167 while (true) {
168 Result result = scanner.next();
169 if (result == null) break;
170 count++;
172 assertEquals(numRows, count);
173 } finally {
174 scanner.close();