HBASE-26700 The way we bypass broken track file is not enough in StoreFileListFile...
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestParallelPut.java
blob60446ca9503281b9d9501caa64065b5e46445c43
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.apache.hadoop.hbase.HBaseTestingUtil.fam1;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertTrue;
24 import java.io.IOException;
25 import java.util.Random;
26 import org.apache.hadoop.hbase.Cell;
27 import org.apache.hadoop.hbase.CellUtil;
28 import org.apache.hadoop.hbase.HBaseClassTestRule;
29 import org.apache.hadoop.hbase.HBaseTestingUtil;
30 import org.apache.hadoop.hbase.HConstants;
31 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
32 import org.apache.hadoop.hbase.TableName;
33 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
34 import org.apache.hadoop.hbase.client.Get;
35 import org.apache.hadoop.hbase.client.Put;
36 import org.apache.hadoop.hbase.client.RegionInfo;
37 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
38 import org.apache.hadoop.hbase.client.Result;
39 import org.apache.hadoop.hbase.client.TableDescriptor;
40 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
41 import org.apache.hadoop.hbase.testclassification.MediumTests;
42 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
43 import org.apache.hadoop.hbase.util.Bytes;
44 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
45 import org.junit.After;
46 import org.junit.Before;
47 import org.junit.BeforeClass;
48 import org.junit.ClassRule;
49 import org.junit.Rule;
50 import org.junit.Test;
51 import org.junit.experimental.categories.Category;
52 import org.junit.rules.TestName;
53 import org.slf4j.Logger;
54 import org.slf4j.LoggerFactory;
56 /**
57 * Testing of multiPut in parallel.
60 @Category({RegionServerTests.class, MediumTests.class})
61 public class TestParallelPut {
63 @ClassRule
64 public static final HBaseClassTestRule CLASS_RULE =
65 HBaseClassTestRule.forClass(TestParallelPut.class);
67 private static final Logger LOG = LoggerFactory.getLogger(TestParallelPut.class);
68 @Rule public TestName name = new TestName();
70 private HRegion region = null;
71 private static HBaseTestingUtil HBTU = new HBaseTestingUtil();
72 private static final int THREADS100 = 100;
74 // Test names
75 static byte[] tableName;
76 static final byte[] qual1 = Bytes.toBytes("qual1");
77 static final byte[] qual2 = Bytes.toBytes("qual2");
78 static final byte[] qual3 = Bytes.toBytes("qual3");
79 static final byte[] value1 = Bytes.toBytes("value1");
80 static final byte[] value2 = Bytes.toBytes("value2");
81 static final byte [] row = Bytes.toBytes("rowA");
82 static final byte [] row2 = Bytes.toBytes("rowB");
84 @BeforeClass
85 public static void beforeClass() {
86 // Make sure enough handlers.
87 HBTU.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, THREADS100);
91 /**
92 * @see org.apache.hadoop.hbase.HBaseTestCase#setUp()
94 @Before
95 public void setUp() throws Exception {
96 tableName = Bytes.toBytes(name.getMethodName());
99 @After
100 public void tearDown() throws Exception {
101 EnvironmentEdgeManagerTestHelper.reset();
102 if (region != null) {
103 region.close(true);
107 public String getName() {
108 return name.getMethodName();
111 //////////////////////////////////////////////////////////////////////////////
112 // New tests that don't spin up a mini cluster but rather just test the
113 // individual code pieces in the HRegion.
114 //////////////////////////////////////////////////////////////////////////////
117 * Test one put command.
119 @Test
120 public void testPut() throws IOException {
121 LOG.info("Starting testPut");
122 this.region = initHRegion(tableName, getName(), fam1);
124 long value = 1L;
126 Put put = new Put(row);
127 put.addColumn(fam1, qual1, Bytes.toBytes(value));
128 region.put(put);
130 assertGet(this.region, row, fam1, qual1, Bytes.toBytes(value));
134 * Test multi-threaded Puts.
136 @Test
137 public void testParallelPuts() throws IOException {
139 LOG.info("Starting testParallelPuts");
141 this.region = initHRegion(tableName, getName(), fam1);
142 int numOps = 1000; // these many operations per thread
144 // create 100 threads, each will do its own puts
145 Putter[] all = new Putter[THREADS100];
147 // create all threads
148 for (int i = 0; i < THREADS100; i++) {
149 all[i] = new Putter(region, i, numOps);
152 // run all threads
153 for (int i = 0; i < THREADS100; i++) {
154 all[i].start();
157 // wait for all threads to finish
158 for (int i = 0; i < THREADS100; i++) {
159 try {
160 all[i].join();
161 } catch (InterruptedException e) {
162 LOG.warn("testParallelPuts encountered InterruptedException." +
163 " Ignoring....", e);
166 LOG.info("testParallelPuts successfully verified " +
167 (numOps * THREADS100) + " put operations.");
171 private static void assertGet(final HRegion region, byte [] row, byte [] familiy,
172 byte[] qualifier, byte[] value) throws IOException {
173 // run a get and see if the value matches
174 Get get = new Get(row);
175 get.addColumn(familiy, qualifier);
176 Result result = region.get(get);
177 assertEquals(1, result.size());
179 Cell kv = result.rawCells()[0];
180 byte[] r = CellUtil.cloneValue(kv);
181 assertTrue(Bytes.compareTo(r, value) == 0);
184 private HRegion initHRegion(byte [] tableName, String callingMethod, byte[] ... families)
185 throws IOException {
186 TableDescriptorBuilder builder =
187 TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
188 for(byte [] family : families) {
189 builder.setColumnFamily(
190 ColumnFamilyDescriptorBuilder.of(family));
192 TableDescriptor tableDescriptor = builder.build();
193 RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
194 return HBTU.createLocalHRegion(info, tableDescriptor);
198 * A thread that makes a few put calls
200 public static class Putter extends Thread {
202 private final HRegion region;
203 private final int threadNumber;
204 private final int numOps;
205 private final Random rand = new Random();
206 byte [] rowkey = null;
208 public Putter(HRegion region, int threadNumber, int numOps) {
209 this.region = region;
210 this.threadNumber = threadNumber;
211 this.numOps = numOps;
212 this.rowkey = Bytes.toBytes((long)threadNumber); // unique rowid per thread
213 setDaemon(true);
216 @Override
217 public void run() {
218 byte[] value = new byte[100];
219 Put[] in = new Put[1];
221 // iterate for the specified number of operations
222 for (int i=0; i<numOps; i++) {
223 // generate random bytes
224 rand.nextBytes(value);
226 // put the randombytes and verify that we can read it. This is one
227 // way of ensuring that rwcc manipulation in HRegion.put() is fine.
228 Put put = new Put(rowkey);
229 put.addColumn(fam1, qual1, value);
230 in[0] = put;
231 try {
232 OperationStatus[] ret = region.batchMutate(in);
233 assertEquals(1, ret.length);
234 assertEquals(OperationStatusCode.SUCCESS, ret[0].getOperationStatusCode());
235 assertGet(this.region, rowkey, fam1, qual1, value);
236 } catch (IOException e) {
237 assertTrue("Thread id " + threadNumber + " operation " + i + " failed.",
238 false);