3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.regionserver
;
21 import static org
.apache
.hadoop
.hbase
.HBaseTestingUtility
.COLUMNS
;
22 import static org
.apache
.hadoop
.hbase
.HBaseTestingUtility
.fam1
;
23 import static org
.apache
.hadoop
.hbase
.HBaseTestingUtility
.fam2
;
24 import static org
.apache
.hadoop
.hbase
.HBaseTestingUtility
.fam3
;
25 import static org
.junit
.Assert
.assertArrayEquals
;
26 import static org
.junit
.Assert
.assertEquals
;
27 import static org
.junit
.Assert
.assertFalse
;
28 import static org
.junit
.Assert
.assertNotNull
;
29 import static org
.junit
.Assert
.assertNull
;
30 import static org
.junit
.Assert
.assertTrue
;
31 import static org
.junit
.Assert
.fail
;
32 import static org
.mockito
.ArgumentMatchers
.any
;
33 import static org
.mockito
.ArgumentMatchers
.anyBoolean
;
34 import static org
.mockito
.ArgumentMatchers
.anyLong
;
35 import static org
.mockito
.Mockito
.doThrow
;
36 import static org
.mockito
.Mockito
.mock
;
37 import static org
.mockito
.Mockito
.never
;
38 import static org
.mockito
.Mockito
.spy
;
39 import static org
.mockito
.Mockito
.times
;
40 import static org
.mockito
.Mockito
.verify
;
41 import static org
.mockito
.Mockito
.when
;
43 import java
.io
.IOException
;
44 import java
.io
.InterruptedIOException
;
45 import java
.math
.BigDecimal
;
46 import java
.nio
.charset
.StandardCharsets
;
47 import java
.security
.PrivilegedExceptionAction
;
48 import java
.util
.ArrayList
;
49 import java
.util
.Arrays
;
50 import java
.util
.Collection
;
51 import java
.util
.Collections
;
52 import java
.util
.List
;
54 import java
.util
.NavigableMap
;
55 import java
.util
.Objects
;
56 import java
.util
.TreeMap
;
57 import java
.util
.UUID
;
58 import java
.util
.concurrent
.Callable
;
59 import java
.util
.concurrent
.CountDownLatch
;
60 import java
.util
.concurrent
.ExecutorService
;
61 import java
.util
.concurrent
.Executors
;
62 import java
.util
.concurrent
.Future
;
63 import java
.util
.concurrent
.TimeUnit
;
64 import java
.util
.concurrent
.atomic
.AtomicBoolean
;
65 import java
.util
.concurrent
.atomic
.AtomicInteger
;
66 import java
.util
.concurrent
.atomic
.AtomicReference
;
67 import org
.apache
.commons
.lang3
.RandomStringUtils
;
68 import org
.apache
.hadoop
.conf
.Configuration
;
69 import org
.apache
.hadoop
.fs
.FSDataOutputStream
;
70 import org
.apache
.hadoop
.fs
.FileStatus
;
71 import org
.apache
.hadoop
.fs
.FileSystem
;
72 import org
.apache
.hadoop
.fs
.Path
;
73 import org
.apache
.hadoop
.hbase
.ArrayBackedTag
;
74 import org
.apache
.hadoop
.hbase
.CategoryBasedTimeout
;
75 import org
.apache
.hadoop
.hbase
.Cell
;
76 import org
.apache
.hadoop
.hbase
.Cell
.Type
;
77 import org
.apache
.hadoop
.hbase
.CellBuilderFactory
;
78 import org
.apache
.hadoop
.hbase
.CellBuilderType
;
79 import org
.apache
.hadoop
.hbase
.CellUtil
;
80 import org
.apache
.hadoop
.hbase
.CompareOperator
;
81 import org
.apache
.hadoop
.hbase
.CompatibilitySingletonFactory
;
82 import org
.apache
.hadoop
.hbase
.DroppedSnapshotException
;
83 import org
.apache
.hadoop
.hbase
.HBaseConfiguration
;
84 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
85 import org
.apache
.hadoop
.hbase
.HColumnDescriptor
;
86 import org
.apache
.hadoop
.hbase
.HConstants
;
87 import org
.apache
.hadoop
.hbase
.HConstants
.OperationStatusCode
;
88 import org
.apache
.hadoop
.hbase
.HDFSBlocksDistribution
;
89 import org
.apache
.hadoop
.hbase
.HRegionInfo
;
90 import org
.apache
.hadoop
.hbase
.HTableDescriptor
;
91 import org
.apache
.hadoop
.hbase
.KeyValue
;
92 import org
.apache
.hadoop
.hbase
.KeyValueUtil
;
93 import org
.apache
.hadoop
.hbase
.MiniHBaseCluster
;
94 import org
.apache
.hadoop
.hbase
.MultithreadedTestUtil
;
95 import org
.apache
.hadoop
.hbase
.MultithreadedTestUtil
.RepeatingTestThread
;
96 import org
.apache
.hadoop
.hbase
.MultithreadedTestUtil
.TestThread
;
97 import org
.apache
.hadoop
.hbase
.NotServingRegionException
;
98 import org
.apache
.hadoop
.hbase
.PrivateCellUtil
;
99 import org
.apache
.hadoop
.hbase
.RegionTooBusyException
;
100 import org
.apache
.hadoop
.hbase
.ServerName
;
101 import org
.apache
.hadoop
.hbase
.TableName
;
102 import org
.apache
.hadoop
.hbase
.TagType
;
103 import org
.apache
.hadoop
.hbase
.Waiter
;
104 import org
.apache
.hadoop
.hbase
.client
.Append
;
105 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptor
;
106 import org
.apache
.hadoop
.hbase
.client
.Delete
;
107 import org
.apache
.hadoop
.hbase
.client
.Durability
;
108 import org
.apache
.hadoop
.hbase
.client
.Get
;
109 import org
.apache
.hadoop
.hbase
.client
.Increment
;
110 import org
.apache
.hadoop
.hbase
.client
.Mutation
;
111 import org
.apache
.hadoop
.hbase
.client
.Put
;
112 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
113 import org
.apache
.hadoop
.hbase
.client
.RegionInfoBuilder
;
114 import org
.apache
.hadoop
.hbase
.client
.Result
;
115 import org
.apache
.hadoop
.hbase
.client
.RowMutations
;
116 import org
.apache
.hadoop
.hbase
.client
.Scan
;
117 import org
.apache
.hadoop
.hbase
.client
.Table
;
118 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
119 import org
.apache
.hadoop
.hbase
.exceptions
.FailedSanityCheckException
;
120 import org
.apache
.hadoop
.hbase
.filter
.BigDecimalComparator
;
121 import org
.apache
.hadoop
.hbase
.filter
.BinaryComparator
;
122 import org
.apache
.hadoop
.hbase
.filter
.ColumnCountGetFilter
;
123 import org
.apache
.hadoop
.hbase
.filter
.CompareFilter
.CompareOp
;
124 import org
.apache
.hadoop
.hbase
.filter
.Filter
;
125 import org
.apache
.hadoop
.hbase
.filter
.FilterBase
;
126 import org
.apache
.hadoop
.hbase
.filter
.FilterList
;
127 import org
.apache
.hadoop
.hbase
.filter
.NullComparator
;
128 import org
.apache
.hadoop
.hbase
.filter
.PrefixFilter
;
129 import org
.apache
.hadoop
.hbase
.filter
.SingleColumnValueExcludeFilter
;
130 import org
.apache
.hadoop
.hbase
.filter
.SingleColumnValueFilter
;
131 import org
.apache
.hadoop
.hbase
.filter
.SubstringComparator
;
132 import org
.apache
.hadoop
.hbase
.filter
.ValueFilter
;
133 import org
.apache
.hadoop
.hbase
.io
.hfile
.HFile
;
134 import org
.apache
.hadoop
.hbase
.monitoring
.MonitoredRPCHandler
;
135 import org
.apache
.hadoop
.hbase
.monitoring
.MonitoredTask
;
136 import org
.apache
.hadoop
.hbase
.monitoring
.TaskMonitor
;
137 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
.MutationBatchOperation
;
138 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
.RegionScannerImpl
;
139 import org
.apache
.hadoop
.hbase
.regionserver
.Region
.RowLock
;
140 import org
.apache
.hadoop
.hbase
.regionserver
.TestHStore
.FaultyFileSystem
;
141 import org
.apache
.hadoop
.hbase
.regionserver
.compactions
.CompactionRequestImpl
;
142 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.FSHLog
;
143 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.MetricsWAL
;
144 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.MetricsWALSource
;
145 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.WALActionsListener
;
146 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.WALUtil
;
147 import org
.apache
.hadoop
.hbase
.security
.User
;
148 import org
.apache
.hadoop
.hbase
.test
.MetricsAssertHelper
;
149 import org
.apache
.hadoop
.hbase
.testclassification
.LargeTests
;
150 import org
.apache
.hadoop
.hbase
.testclassification
.VerySlowRegionServerTests
;
151 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
152 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManager
;
153 import org
.apache
.hadoop
.hbase
.util
.EnvironmentEdgeManagerTestHelper
;
154 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
155 import org
.apache
.hadoop
.hbase
.util
.IncrementingEnvironmentEdge
;
156 import org
.apache
.hadoop
.hbase
.util
.ManualEnvironmentEdge
;
157 import org
.apache
.hadoop
.hbase
.util
.Threads
;
158 import org
.apache
.hadoop
.hbase
.wal
.AbstractFSWALProvider
;
159 import org
.apache
.hadoop
.hbase
.wal
.FaultyFSLog
;
160 import org
.apache
.hadoop
.hbase
.wal
.WAL
;
161 import org
.apache
.hadoop
.hbase
.wal
.WALEdit
;
162 import org
.apache
.hadoop
.hbase
.wal
.WALFactory
;
163 import org
.apache
.hadoop
.hbase
.wal
.WALKeyImpl
;
164 import org
.apache
.hadoop
.hbase
.wal
.WALProvider
;
165 import org
.apache
.hadoop
.hbase
.wal
.WALProvider
.Writer
;
166 import org
.apache
.hadoop
.hbase
.wal
.WALSplitter
;
167 import org
.junit
.After
;
168 import org
.junit
.Assert
;
169 import org
.junit
.Before
;
170 import org
.junit
.ClassRule
;
171 import org
.junit
.Rule
;
172 import org
.junit
.Test
;
173 import org
.junit
.experimental
.categories
.Category
;
174 import org
.junit
.rules
.ExpectedException
;
175 import org
.junit
.rules
.TestName
;
176 import org
.junit
.rules
.TestRule
;
177 import org
.mockito
.ArgumentCaptor
;
178 import org
.mockito
.ArgumentMatcher
;
179 import org
.mockito
.Mockito
;
180 import org
.mockito
.invocation
.InvocationOnMock
;
181 import org
.mockito
.stubbing
.Answer
;
182 import org
.slf4j
.Logger
;
183 import org
.slf4j
.LoggerFactory
;
185 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Lists
;
186 import org
.apache
.hbase
.thirdparty
.com
.google
.protobuf
.ByteString
;
188 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
189 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.CompactionDescriptor
;
190 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.FlushDescriptor
;
191 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.FlushDescriptor
.FlushAction
;
192 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.FlushDescriptor
.StoreFlushDescriptor
;
193 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.RegionEventDescriptor
;
194 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.WALProtos
.StoreDescriptor
;
197 * Basic stand-alone testing of HRegion. No clusters!
199 * A lot of the meta information for an HRegion now lives inside other HRegions
200 * or in the HBaseMaster, so only basic testing is possible.
202 @Category({VerySlowRegionServerTests
.class, LargeTests
.class})
203 @SuppressWarnings("deprecation")
204 public class TestHRegion
{
205 // Do not spin up clusters in here. If you need to spin up a cluster, do it
206 // over in TestHRegionOnCluster.
207 private static final Logger LOG
= LoggerFactory
.getLogger(TestHRegion
.class);
209 public TestName name
= new TestName();
211 public static final TestRule timeout
=
212 CategoryBasedTimeout
.forClass(TestHRegion
.class);
213 @Rule public final ExpectedException thrown
= ExpectedException
.none();
215 private static final String COLUMN_FAMILY
= "MyCF";
216 private static final byte [] COLUMN_FAMILY_BYTES
= Bytes
.toBytes(COLUMN_FAMILY
);
218 HRegion region
= null;
219 // Do not run unit tests in parallel (? Why not? It don't work? Why not? St.Ack)
220 protected static HBaseTestingUtility TEST_UTIL
;
221 public static Configuration CONF
;
223 private static FileSystem FILESYSTEM
;
224 private final int MAX_VERSIONS
= 2;
227 protected TableName tableName
;
228 protected String method
;
229 protected final byte[] qual
= Bytes
.toBytes("qual");
230 protected final byte[] qual1
= Bytes
.toBytes("qual1");
231 protected final byte[] qual2
= Bytes
.toBytes("qual2");
232 protected final byte[] qual3
= Bytes
.toBytes("qual3");
233 protected final byte[] value
= Bytes
.toBytes("value");
234 protected final byte[] value1
= Bytes
.toBytes("value1");
235 protected final byte[] value2
= Bytes
.toBytes("value2");
236 protected final byte[] row
= Bytes
.toBytes("rowA");
237 protected final byte[] row2
= Bytes
.toBytes("rowB");
239 protected final MetricsAssertHelper metricsAssertHelper
= CompatibilitySingletonFactory
240 .getInstance(MetricsAssertHelper
.class);
243 public void setup() throws IOException
{
244 TEST_UTIL
= HBaseTestingUtility
.createLocalHTU();
245 FILESYSTEM
= TEST_UTIL
.getTestFileSystem();
246 CONF
= TEST_UTIL
.getConfiguration();
247 dir
= TEST_UTIL
.getDataTestDir("TestHRegion").toString();
248 method
= name
.getMethodName();
249 tableName
= TableName
.valueOf(method
);
253 public void tearDown() throws Exception
{
254 EnvironmentEdgeManagerTestHelper
.reset();
255 LOG
.info("Cleaning test directory: " + TEST_UTIL
.getDataTestDir());
256 TEST_UTIL
.cleanupTestDir();
260 * Test that I can use the max flushed sequence id after the close.
261 * @throws IOException
264 public void testSequenceId() throws IOException
{
265 HRegion region
= initHRegion(tableName
, method
, CONF
, COLUMN_FAMILY_BYTES
);
266 assertEquals(HConstants
.NO_SEQNUM
, region
.getMaxFlushedSeqId());
267 // Weird. This returns 0 if no store files or no edits. Afraid to change it.
268 assertEquals(0, (long)region
.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES
));
270 assertEquals(HConstants
.NO_SEQNUM
, region
.getMaxFlushedSeqId());
271 assertEquals(0, (long)region
.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES
));
272 // Open region again.
273 region
= initHRegion(tableName
, method
, CONF
, COLUMN_FAMILY_BYTES
);
274 byte [] value
= Bytes
.toBytes(method
);
275 // Make a random put against our cf.
276 Put put
= new Put(value
);
277 put
.addColumn(COLUMN_FAMILY_BYTES
, null, value
);
279 // No flush yet so init numbers should still be in place.
280 assertEquals(HConstants
.NO_SEQNUM
, region
.getMaxFlushedSeqId());
281 assertEquals(0, (long)region
.getMaxStoreSeqId().get(COLUMN_FAMILY_BYTES
));
283 long max
= region
.getMaxFlushedSeqId();
285 assertEquals(max
, region
.getMaxFlushedSeqId());
289 * Test for Bug 2 of HBASE-10466.
290 * "Bug 2: Conditions for the first flush of region close (so-called pre-flush) If memstoreSize
291 * is smaller than a certain value, or when region close starts a flush is ongoing, the first
292 * flush is skipped and only the second flush takes place. However, two flushes are required in
293 * case previous flush fails and leaves some data in snapshot. The bug could cause loss of data
294 * in current memstore. The fix is removing all conditions except abort check so we ensure 2
295 * flushes for region close."
296 * @throws IOException
299 public void testCloseCarryingSnapshot() throws IOException
{
300 HRegion region
= initHRegion(tableName
, method
, CONF
, COLUMN_FAMILY_BYTES
);
301 HStore store
= region
.getStore(COLUMN_FAMILY_BYTES
);
302 // Get some random bytes.
303 byte [] value
= Bytes
.toBytes(method
);
304 // Make a random put against our cf.
305 Put put
= new Put(value
);
306 put
.addColumn(COLUMN_FAMILY_BYTES
, null, value
);
307 // First put something in current memstore, which will be in snapshot after flusher.prepare()
309 StoreFlushContext storeFlushCtx
= store
.createFlushContext(12345, FlushLifeCycleTracker
.DUMMY
);
310 storeFlushCtx
.prepare();
311 // Second put something in current memstore
312 put
.addColumn(COLUMN_FAMILY_BYTES
, Bytes
.toBytes("abc"), value
);
314 // Close with something in memstore and something in the snapshot. Make sure all is cleared.
316 assertEquals(0, region
.getMemStoreSize());
317 HBaseTestingUtility
.closeRegionAndWAL(region
);
321 * This test is for verifying memstore snapshot size is correctly updated in case of rollback
325 public void testMemstoreSnapshotSize() throws IOException
{
326 class MyFaultyFSLog
extends FaultyFSLog
{
327 StoreFlushContext storeFlushCtx
;
328 public MyFaultyFSLog(FileSystem fs
, Path rootDir
, String logName
, Configuration conf
)
330 super(fs
, rootDir
, logName
, conf
);
333 void setStoreFlushCtx(StoreFlushContext storeFlushCtx
) {
334 this.storeFlushCtx
= storeFlushCtx
;
338 public void sync(long txid
) throws IOException
{
339 storeFlushCtx
.prepare();
344 FileSystem fs
= FileSystem
.get(CONF
);
345 Path rootDir
= new Path(dir
+ "testMemstoreSnapshotSize");
346 MyFaultyFSLog faultyLog
= new MyFaultyFSLog(fs
, rootDir
, "testMemstoreSnapshotSize", CONF
);
347 HRegion region
= initHRegion(tableName
, null, null, false, Durability
.SYNC_WAL
, faultyLog
,
348 COLUMN_FAMILY_BYTES
);
350 HStore store
= region
.getStore(COLUMN_FAMILY_BYTES
);
351 // Get some random bytes.
352 byte [] value
= Bytes
.toBytes(method
);
353 faultyLog
.setStoreFlushCtx(store
.createFlushContext(12345, FlushLifeCycleTracker
.DUMMY
));
355 Put put
= new Put(value
);
356 put
.addColumn(COLUMN_FAMILY_BYTES
, Bytes
.toBytes("abc"), value
);
357 faultyLog
.setFailureType(FaultyFSLog
.FailureType
.SYNC
);
359 boolean threwIOE
= false;
362 } catch (IOException ioe
) {
365 assertTrue("The regionserver should have thrown an exception", threwIOE
);
367 long sz
= store
.getFlushableSize().getDataSize();
368 assertTrue("flushable size should be zero, but it is " + sz
, sz
== 0);
369 HBaseTestingUtility
.closeRegionAndWAL(region
);
373 * Create a WAL outside of the usual helper in
374 * {@link HBaseTestingUtility#createWal(Configuration, Path, RegionInfo)} because that method
375 * doesn't play nicely with FaultyFileSystem. Call this method before overriding
376 * {@code fs.file.impl}.
377 * @param callingMethod a unique component for the path, probably the name of the test method.
379 private static WAL
createWALCompatibleWithFaultyFileSystem(String callingMethod
,
380 Configuration conf
, TableName tableName
) throws IOException
{
381 final Path logDir
= TEST_UTIL
.getDataTestDirOnTestFS(callingMethod
+ ".log");
382 final Configuration walConf
= new Configuration(conf
);
383 FSUtils
.setRootDir(walConf
, logDir
);
384 return new WALFactory(walConf
, Collections
.<WALActionsListener
> singletonList(new MetricsWAL()),
385 callingMethod
).getWAL(RegionInfoBuilder
.newBuilder(tableName
).build());
389 public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOException
{
390 String testName
= "testMemstoreSizeAccountingWithFailedPostBatchMutate";
391 FileSystem fs
= FileSystem
.get(CONF
);
392 Path rootDir
= new Path(dir
+ testName
);
393 FSHLog hLog
= new FSHLog(fs
, rootDir
, testName
, CONF
);
394 HRegion region
= initHRegion(tableName
, null, null, false, Durability
.SYNC_WAL
, hLog
,
395 COLUMN_FAMILY_BYTES
);
396 HStore store
= region
.getStore(COLUMN_FAMILY_BYTES
);
397 assertEquals(0, region
.getMemStoreSize());
400 byte [] value
= Bytes
.toBytes(method
);
401 Put put
= new Put(value
);
402 put
.addColumn(COLUMN_FAMILY_BYTES
, Bytes
.toBytes("abc"), value
);
404 long onePutSize
= region
.getMemStoreSize();
405 assertTrue(onePutSize
> 0);
407 RegionCoprocessorHost mockedCPHost
= Mockito
.mock(RegionCoprocessorHost
.class);
408 doThrow(new IOException())
409 .when(mockedCPHost
).postBatchMutate(Mockito
.<MiniBatchOperationInProgress
<Mutation
>>any());
410 region
.setCoprocessorHost(mockedCPHost
);
412 put
= new Put(value
);
413 put
.addColumn(COLUMN_FAMILY_BYTES
, Bytes
.toBytes("dfg"), value
);
416 fail("Should have failed with IOException");
417 } catch (IOException expected
) {
419 long expectedSize
= onePutSize
* 2;
420 assertEquals("memstoreSize should be incremented", expectedSize
, region
.getMemStoreSize());
421 assertEquals("flushable size should be incremented", expectedSize
,
422 store
.getFlushableSize().getDataSize());
424 region
.setCoprocessorHost(null);
425 HBaseTestingUtility
.closeRegionAndWAL(region
);
429 * Test we do not lose data if we fail a flush and then close.
430 * Part of HBase-10466. Tests the following from the issue description:
431 * "Bug 1: Wrong calculation of HRegion.memstoreSize: When a flush fails, data to be flushed is
432 * kept in each MemStore's snapshot and wait for next flush attempt to continue on it. But when
433 * the next flush succeeds, the counter of total memstore size in HRegion is always deduced by
434 * the sum of current memstore sizes instead of snapshots left from previous failed flush. This
435 * calculation is problematic that almost every time there is failed flush, HRegion.memstoreSize
436 * gets reduced by a wrong value. If region flush could not proceed for a couple cycles, the size
437 * in current memstore could be much larger than the snapshot. It's likely to drift memstoreSize
438 * much smaller than expected. In extreme case, if the error accumulates to even bigger than
439 * HRegion's memstore size limit, any further flush is skipped because flush does not do anything
440 * if memstoreSize is not larger than 0."
444 public void testFlushSizeAccounting() throws Exception
{
445 final Configuration conf
= HBaseConfiguration
.create(CONF
);
446 final WAL wal
= createWALCompatibleWithFaultyFileSystem(method
, conf
, tableName
);
448 conf
.setInt("hbase.hstore.flush.retries.number", 1);
450 User
.createUserForTesting(conf
, method
, new String
[]{"foo"});
451 // Inject our faulty LocalFileSystem
452 conf
.setClass("fs.file.impl", FaultyFileSystem
.class, FileSystem
.class);
453 user
.runAs(new PrivilegedExceptionAction
<Object
>() {
455 public Object
run() throws Exception
{
456 // Make sure it worked (above is sensitive to caching details in hadoop core)
457 FileSystem fs
= FileSystem
.get(conf
);
458 Assert
.assertEquals(FaultyFileSystem
.class, fs
.getClass());
459 FaultyFileSystem ffs
= (FaultyFileSystem
)fs
;
460 HRegion region
= null;
463 region
= initHRegion(tableName
, null, null, false, Durability
.SYNC_WAL
, wal
,
464 COLUMN_FAMILY_BYTES
);
465 long size
= region
.getMemStoreSize();
466 Assert
.assertEquals(0, size
);
467 // Put one item into memstore. Measure the size of one item in memstore.
468 Put p1
= new Put(row
);
469 p1
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual1
, 1, (byte[]) null));
471 final long sizeOfOnePut
= region
.getMemStoreSize();
472 // Fail a flush which means the current memstore will hang out as memstore 'snapshot'.
474 LOG
.info("Flushing");
476 Assert
.fail("Didn't bubble up IOE!");
477 } catch (DroppedSnapshotException dse
) {
478 // What we are expecting
479 region
.closing
.set(false); // this is needed for the rest of the test to work
481 // Make it so all writes succeed from here on out
482 ffs
.fault
.set(false);
483 // Check sizes. Should still be the one entry.
484 Assert
.assertEquals(sizeOfOnePut
, region
.getMemStoreSize());
485 // Now add two entries so that on this next flush that fails, we can see if we
486 // subtract the right amount, the snapshot size only.
487 Put p2
= new Put(row
);
488 p2
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual2
, 2, (byte[])null));
489 p2
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual3
, 3, (byte[])null));
491 long expectedSize
= sizeOfOnePut
* 3;
492 Assert
.assertEquals(expectedSize
, region
.getMemStoreSize());
493 // Do a successful flush. It will clear the snapshot only. Thats how flushes work.
494 // If already a snapshot, we clear it else we move the memstore to be snapshot and flush
497 // Make sure our memory accounting is right.
498 Assert
.assertEquals(sizeOfOnePut
* 2, region
.getMemStoreSize());
500 HBaseTestingUtility
.closeRegionAndWAL(region
);
505 FileSystem
.closeAllForUGI(user
.getUGI());
509 public void testCloseWithFailingFlush() throws Exception
{
510 final Configuration conf
= HBaseConfiguration
.create(CONF
);
511 final WAL wal
= createWALCompatibleWithFaultyFileSystem(method
, conf
, tableName
);
513 conf
.setInt("hbase.hstore.flush.retries.number", 1);
515 User
.createUserForTesting(conf
, this.method
, new String
[]{"foo"});
516 // Inject our faulty LocalFileSystem
517 conf
.setClass("fs.file.impl", FaultyFileSystem
.class, FileSystem
.class);
518 user
.runAs(new PrivilegedExceptionAction
<Object
>() {
520 public Object
run() throws Exception
{
521 // Make sure it worked (above is sensitive to caching details in hadoop core)
522 FileSystem fs
= FileSystem
.get(conf
);
523 Assert
.assertEquals(FaultyFileSystem
.class, fs
.getClass());
524 FaultyFileSystem ffs
= (FaultyFileSystem
)fs
;
525 HRegion region
= null;
528 region
= initHRegion(tableName
, null, null, false,
529 Durability
.SYNC_WAL
, wal
, COLUMN_FAMILY_BYTES
);
530 long size
= region
.getMemStoreSize();
531 Assert
.assertEquals(0, size
);
532 // Put one item into memstore. Measure the size of one item in memstore.
533 Put p1
= new Put(row
);
534 p1
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual1
, 1, (byte[])null));
536 // Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only.
537 HStore store
= region
.getStore(COLUMN_FAMILY_BYTES
);
538 StoreFlushContext storeFlushCtx
=
539 store
.createFlushContext(12345, FlushLifeCycleTracker
.DUMMY
);
540 storeFlushCtx
.prepare();
541 // Now add two entries to the foreground memstore.
542 Put p2
= new Put(row
);
543 p2
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual2
, 2, (byte[])null));
544 p2
.add(new KeyValue(row
, COLUMN_FAMILY_BYTES
, qual3
, 3, (byte[])null));
546 // Now try close on top of a failing flush.
549 } catch (DroppedSnapshotException dse
) {
551 LOG
.info("Expected DroppedSnapshotException");
553 // Make it so all writes succeed from here on out so can close clean
554 ffs
.fault
.set(false);
555 HBaseTestingUtility
.closeRegionAndWAL(region
);
560 FileSystem
.closeAllForUGI(user
.getUGI());
564 public void testCompactionAffectedByScanners() throws Exception
{
565 byte[] family
= Bytes
.toBytes("family");
566 this.region
= initHRegion(tableName
, method
, CONF
, family
);
568 Put put
= new Put(Bytes
.toBytes("r1"));
569 put
.addColumn(family
, Bytes
.toBytes("q1"), Bytes
.toBytes("v1"));
573 Scan scan
= new Scan();
574 scan
.setMaxVersions(3);
575 // open the first scanner
576 RegionScanner scanner1
= region
.getScanner(scan
);
578 Delete delete
= new Delete(Bytes
.toBytes("r1"));
579 region
.delete(delete
);
582 // open the second scanner
583 RegionScanner scanner2
= region
.getScanner(scan
);
585 List
<Cell
> results
= new ArrayList
<>();
587 System
.out
.println("Smallest read point:" + region
.getSmallestReadPoint());
589 // make a major compaction
590 region
.compact(true);
592 // open the third scanner
593 RegionScanner scanner3
= region
.getScanner(scan
);
595 // get data from scanner 1, 2, 3 after major compaction
596 scanner1
.next(results
);
597 System
.out
.println(results
);
598 assertEquals(1, results
.size());
601 scanner2
.next(results
);
602 System
.out
.println(results
);
603 assertEquals(0, results
.size());
606 scanner3
.next(results
);
607 System
.out
.println(results
);
608 assertEquals(0, results
.size());
612 public void testToShowNPEOnRegionScannerReseek() throws Exception
{
613 byte[] family
= Bytes
.toBytes("family");
614 this.region
= initHRegion(tableName
, method
, CONF
, family
);
616 Put put
= new Put(Bytes
.toBytes("r1"));
617 put
.addColumn(family
, Bytes
.toBytes("q1"), Bytes
.toBytes("v1"));
619 put
= new Put(Bytes
.toBytes("r2"));
620 put
.addColumn(family
, Bytes
.toBytes("q1"), Bytes
.toBytes("v1"));
624 Scan scan
= new Scan();
625 scan
.setMaxVersions(3);
626 // open the first scanner
627 RegionScanner scanner1
= region
.getScanner(scan
);
629 System
.out
.println("Smallest read point:" + region
.getSmallestReadPoint());
631 region
.compact(true);
633 scanner1
.reseek(Bytes
.toBytes("r2"));
634 List
<Cell
> results
= new ArrayList
<>();
635 scanner1
.next(results
);
636 Cell keyValue
= results
.get(0);
637 Assert
.assertTrue(Bytes
.compareTo(CellUtil
.cloneRow(keyValue
), Bytes
.toBytes("r2")) == 0);
642 public void testSkipRecoveredEditsReplay() throws Exception
{
643 byte[] family
= Bytes
.toBytes("family");
644 this.region
= initHRegion(tableName
, method
, CONF
, family
);
645 final WALFactory wals
= new WALFactory(CONF
, null, method
);
647 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
648 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
649 byte[] regionName
= region
.getRegionInfo().getEncodedNameAsBytes();
651 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
653 long maxSeqId
= 1050;
654 long minSeqId
= 1000;
656 for (long i
= minSeqId
; i
<= maxSeqId
; i
+= 10) {
657 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", i
));
658 fs
.create(recoveredEdits
);
659 WALProvider
.Writer writer
= wals
.createRecoveredEditsWriter(fs
, recoveredEdits
);
661 long time
= System
.nanoTime();
662 WALEdit edit
= new WALEdit();
663 edit
.add(new KeyValue(row
, family
, Bytes
.toBytes(i
), time
, KeyValue
.Type
.Put
, Bytes
665 writer
.append(new WAL
.Entry(new WALKeyImpl(regionName
, tableName
, i
, time
,
666 HConstants
.DEFAULT_CLUSTER_ID
), edit
));
670 MonitoredTask status
= TaskMonitor
.get().createStatus(method
);
671 Map
<byte[], Long
> maxSeqIdInStores
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
672 for (HStore store
: region
.getStores()) {
673 maxSeqIdInStores
.put(Bytes
.toBytes(store
.getColumnFamilyName()), minSeqId
- 1);
675 long seqId
= region
.replayRecoveredEditsIfAny(regiondir
, maxSeqIdInStores
, null, status
);
676 assertEquals(maxSeqId
, seqId
);
677 region
.getMVCC().advanceTo(seqId
);
678 Get get
= new Get(row
);
679 Result result
= region
.get(get
);
680 for (long i
= minSeqId
; i
<= maxSeqId
; i
+= 10) {
681 List
<Cell
> kvs
= result
.getColumnCells(family
, Bytes
.toBytes(i
));
682 assertEquals(1, kvs
.size());
683 assertArrayEquals(Bytes
.toBytes(i
), CellUtil
.cloneValue(kvs
.get(0)));
686 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
693 public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception
{
694 byte[] family
= Bytes
.toBytes("family");
695 this.region
= initHRegion(tableName
, method
, CONF
, family
);
696 final WALFactory wals
= new WALFactory(CONF
, null, method
);
698 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
699 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
700 byte[] regionName
= region
.getRegionInfo().getEncodedNameAsBytes();
702 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
704 long maxSeqId
= 1050;
705 long minSeqId
= 1000;
707 for (long i
= minSeqId
; i
<= maxSeqId
; i
+= 10) {
708 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", i
));
709 fs
.create(recoveredEdits
);
710 WALProvider
.Writer writer
= wals
.createRecoveredEditsWriter(fs
, recoveredEdits
);
712 long time
= System
.nanoTime();
713 WALEdit edit
= new WALEdit();
714 edit
.add(new KeyValue(row
, family
, Bytes
.toBytes(i
), time
, KeyValue
.Type
.Put
, Bytes
716 writer
.append(new WAL
.Entry(new WALKeyImpl(regionName
, tableName
, i
, time
,
717 HConstants
.DEFAULT_CLUSTER_ID
), edit
));
721 long recoverSeqId
= 1030;
722 MonitoredTask status
= TaskMonitor
.get().createStatus(method
);
723 Map
<byte[], Long
> maxSeqIdInStores
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
724 for (HStore store
: region
.getStores()) {
725 maxSeqIdInStores
.put(Bytes
.toBytes(store
.getColumnFamilyName()), recoverSeqId
- 1);
727 long seqId
= region
.replayRecoveredEditsIfAny(regiondir
, maxSeqIdInStores
, null, status
);
728 assertEquals(maxSeqId
, seqId
);
729 region
.getMVCC().advanceTo(seqId
);
730 Get get
= new Get(row
);
731 Result result
= region
.get(get
);
732 for (long i
= minSeqId
; i
<= maxSeqId
; i
+= 10) {
733 List
<Cell
> kvs
= result
.getColumnCells(family
, Bytes
.toBytes(i
));
734 if (i
< recoverSeqId
) {
735 assertEquals(0, kvs
.size());
737 assertEquals(1, kvs
.size());
738 assertArrayEquals(Bytes
.toBytes(i
), CellUtil
.cloneValue(kvs
.get(0)));
742 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
749 public void testSkipRecoveredEditsReplayAllIgnored() throws Exception
{
750 byte[] family
= Bytes
.toBytes("family");
751 this.region
= initHRegion(tableName
, method
, CONF
, family
);
753 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
754 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
756 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
757 for (int i
= 1000; i
< 1050; i
+= 10) {
758 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", i
));
759 FSDataOutputStream dos
= fs
.create(recoveredEdits
);
763 long minSeqId
= 2000;
764 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", minSeqId
- 1));
765 FSDataOutputStream dos
= fs
.create(recoveredEdits
);
768 Map
<byte[], Long
> maxSeqIdInStores
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
769 for (HStore store
: region
.getStores()) {
770 maxSeqIdInStores
.put(Bytes
.toBytes(store
.getColumnFamilyName()), minSeqId
);
772 long seqId
= region
.replayRecoveredEditsIfAny(regiondir
, maxSeqIdInStores
, null, null);
773 assertEquals(minSeqId
, seqId
);
775 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
781 public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception
{
782 byte[] family
= Bytes
.toBytes("family");
783 this.region
= initHRegion(tableName
, method
, CONF
, family
);
784 final WALFactory wals
= new WALFactory(CONF
, null, method
);
786 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
787 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
788 byte[] regionName
= region
.getRegionInfo().getEncodedNameAsBytes();
789 byte[][] columns
= region
.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]);
791 assertEquals(0, region
.getStoreFileList(columns
).size());
793 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
795 long maxSeqId
= 1050;
796 long minSeqId
= 1000;
798 for (long i
= minSeqId
; i
<= maxSeqId
; i
+= 10) {
799 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", i
));
800 fs
.create(recoveredEdits
);
801 WALProvider
.Writer writer
= wals
.createRecoveredEditsWriter(fs
, recoveredEdits
);
803 long time
= System
.nanoTime();
806 edit
= WALEdit
.createCompaction(region
.getRegionInfo(),
807 CompactionDescriptor
.newBuilder()
808 .setTableName(ByteString
.copyFrom(tableName
.getName()))
809 .setFamilyName(ByteString
.copyFrom(regionName
))
810 .setEncodedRegionName(ByteString
.copyFrom(regionName
))
811 .setStoreHomeDirBytes(ByteString
.copyFrom(Bytes
.toBytes(regiondir
.toString())))
812 .setRegionName(ByteString
.copyFrom(region
.getRegionInfo().getRegionName()))
815 edit
= new WALEdit();
816 edit
.add(new KeyValue(row
, family
, Bytes
.toBytes(i
), time
, KeyValue
.Type
.Put
, Bytes
819 writer
.append(new WAL
.Entry(new WALKeyImpl(regionName
, tableName
, i
, time
,
820 HConstants
.DEFAULT_CLUSTER_ID
), edit
));
824 long recoverSeqId
= 1030;
825 Map
<byte[], Long
> maxSeqIdInStores
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
826 MonitoredTask status
= TaskMonitor
.get().createStatus(method
);
827 for (HStore store
: region
.getStores()) {
828 maxSeqIdInStores
.put(Bytes
.toBytes(store
.getColumnFamilyName()), recoverSeqId
- 1);
830 long seqId
= region
.replayRecoveredEditsIfAny(regiondir
, maxSeqIdInStores
, null, status
);
831 assertEquals(maxSeqId
, seqId
);
833 // assert that the files are flushed
834 assertEquals(1, region
.getStoreFileList(columns
).size());
837 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
844 public void testRecoveredEditsReplayCompaction() throws Exception
{
845 testRecoveredEditsReplayCompaction(false);
846 testRecoveredEditsReplayCompaction(true);
849 public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName
) throws Exception
{
850 CONF
.setClass(HConstants
.REGION_IMPL
, HRegionForTesting
.class, Region
.class);
851 byte[] family
= Bytes
.toBytes("family");
852 this.region
= initHRegion(tableName
, method
, CONF
, family
);
853 final WALFactory wals
= new WALFactory(CONF
, null, method
);
855 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
856 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
857 byte[] regionName
= region
.getRegionInfo().getEncodedNameAsBytes();
862 for (long i
= minSeqId
; i
< maxSeqId
; i
++) {
863 Put put
= new Put(Bytes
.toBytes(i
));
864 put
.addColumn(family
, Bytes
.toBytes(i
), Bytes
.toBytes(i
));
869 // this will create a region with 3 files
870 assertEquals(3, region
.getStore(family
).getStorefilesCount());
871 List
<Path
> storeFiles
= new ArrayList
<>(3);
872 for (HStoreFile sf
: region
.getStore(family
).getStorefiles()) {
873 storeFiles
.add(sf
.getPath());
876 // disable compaction completion
877 CONF
.setBoolean("hbase.hstore.compaction.complete", false);
878 region
.compactStores();
880 // ensure that nothing changed
881 assertEquals(3, region
.getStore(family
).getStorefilesCount());
883 // now find the compacted file, and manually add it to the recovered edits
884 Path tmpDir
= new Path(region
.getRegionFileSystem().getTempDir(), Bytes
.toString(family
));
885 FileStatus
[] files
= FSUtils
.listStatus(fs
, tmpDir
);
886 String errorMsg
= "Expected to find 1 file in the region temp directory "
887 + "from the compaction, could not find any";
888 assertNotNull(errorMsg
, files
);
889 assertEquals(errorMsg
, 1, files
.length
);
890 // move the file inside region dir
891 Path newFile
= region
.getRegionFileSystem().commitStoreFile(Bytes
.toString(family
),
894 byte[] encodedNameAsBytes
= this.region
.getRegionInfo().getEncodedNameAsBytes();
895 byte[] fakeEncodedNameAsBytes
= new byte [encodedNameAsBytes
.length
];
896 for (int i
=0; i
< encodedNameAsBytes
.length
; i
++) {
897 // Mix the byte array to have a new encodedName
898 fakeEncodedNameAsBytes
[i
] = (byte) (encodedNameAsBytes
[i
] + 1);
901 CompactionDescriptor compactionDescriptor
= ProtobufUtil
.toCompactionDescriptor(this.region
902 .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes
: null, family
,
903 storeFiles
, Lists
.newArrayList(newFile
),
904 region
.getRegionFileSystem().getStoreDir(Bytes
.toString(family
)));
906 WALUtil
.writeCompactionMarker(region
.getWAL(), this.region
.getReplicationScope(),
907 this.region
.getRegionInfo(), compactionDescriptor
, region
.getMVCC());
909 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
911 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", 1000));
912 fs
.create(recoveredEdits
);
913 WALProvider
.Writer writer
= wals
.createRecoveredEditsWriter(fs
, recoveredEdits
);
915 long time
= System
.nanoTime();
917 writer
.append(new WAL
.Entry(new WALKeyImpl(regionName
, tableName
, 10, time
,
918 HConstants
.DEFAULT_CLUSTER_ID
), WALEdit
.createCompaction(region
.getRegionInfo(),
919 compactionDescriptor
)));
922 // close the region now, and reopen again
923 region
.getTableDescriptor();
924 region
.getRegionInfo();
927 region
= HRegion
.openHRegion(region
, null);
928 } catch (WrongRegionException wre
) {
929 fail("Matching encoded region name should not have produced WrongRegionException");
932 // now check whether we have only one store file, the compacted one
933 Collection
<HStoreFile
> sfs
= region
.getStore(family
).getStorefiles();
934 for (HStoreFile sf
: sfs
) {
935 LOG
.info(Objects
.toString(sf
.getPath()));
937 if (!mismatchedRegionName
) {
938 assertEquals(1, region
.getStore(family
).getStorefilesCount());
940 files
= FSUtils
.listStatus(fs
, tmpDir
);
941 assertTrue("Expected to find 0 files inside " + tmpDir
, files
== null || files
.length
== 0);
943 for (long i
= minSeqId
; i
< maxSeqId
; i
++) {
944 Get get
= new Get(Bytes
.toBytes(i
));
945 Result result
= region
.get(get
);
946 byte[] value
= result
.getValue(family
, Bytes
.toBytes(i
));
947 assertArrayEquals(Bytes
.toBytes(i
), value
);
950 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
953 CONF
.setClass(HConstants
.REGION_IMPL
, HRegion
.class, Region
.class);
958 public void testFlushMarkers() throws Exception
{
959 // tests that flush markers are written to WAL and handled at recovered edits
960 byte[] family
= Bytes
.toBytes("family");
961 Path logDir
= TEST_UTIL
.getDataTestDirOnTestFS(method
+ ".log");
962 final Configuration walConf
= new Configuration(TEST_UTIL
.getConfiguration());
963 FSUtils
.setRootDir(walConf
, logDir
);
964 final WALFactory wals
= new WALFactory(walConf
, null, method
);
965 final WAL wal
= wals
.getWAL(RegionInfoBuilder
.newBuilder(tableName
).build());
967 this.region
= initHRegion(tableName
, HConstants
.EMPTY_START_ROW
,
968 HConstants
.EMPTY_END_ROW
, false, Durability
.USE_DEFAULT
, wal
, family
);
970 Path regiondir
= region
.getRegionFileSystem().getRegionDir();
971 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
972 byte[] regionName
= region
.getRegionInfo().getEncodedNameAsBytes();
977 for (long i
= minSeqId
; i
< maxSeqId
; i
++) {
978 Put put
= new Put(Bytes
.toBytes(i
));
979 put
.addColumn(family
, Bytes
.toBytes(i
), Bytes
.toBytes(i
));
984 // this will create a region with 3 files from flush
985 assertEquals(3, region
.getStore(family
).getStorefilesCount());
986 List
<String
> storeFiles
= new ArrayList
<>(3);
987 for (HStoreFile sf
: region
.getStore(family
).getStorefiles()) {
988 storeFiles
.add(sf
.getPath().getName());
991 // now verify that the flush markers are written
993 WAL
.Reader reader
= WALFactory
.createReader(fs
, AbstractFSWALProvider
.getCurrentFileName(wal
),
994 TEST_UTIL
.getConfiguration());
996 List
<WAL
.Entry
> flushDescriptors
= new ArrayList
<>();
997 long lastFlushSeqId
= -1;
999 WAL
.Entry entry
= reader
.next();
1000 if (entry
== null) {
1003 Cell cell
= entry
.getEdit().getCells().get(0);
1004 if (WALEdit
.isMetaEditFamily(cell
)) {
1005 FlushDescriptor flushDesc
= WALEdit
.getFlushDescriptor(cell
);
1006 assertNotNull(flushDesc
);
1007 assertArrayEquals(tableName
.getName(), flushDesc
.getTableName().toByteArray());
1008 if (flushDesc
.getAction() == FlushAction
.START_FLUSH
) {
1009 assertTrue(flushDesc
.getFlushSequenceNumber() > lastFlushSeqId
);
1010 } else if (flushDesc
.getAction() == FlushAction
.COMMIT_FLUSH
) {
1011 assertTrue(flushDesc
.getFlushSequenceNumber() == lastFlushSeqId
);
1013 lastFlushSeqId
= flushDesc
.getFlushSequenceNumber();
1014 assertArrayEquals(regionName
, flushDesc
.getEncodedRegionName().toByteArray());
1015 assertEquals(1, flushDesc
.getStoreFlushesCount()); //only one store
1016 StoreFlushDescriptor storeFlushDesc
= flushDesc
.getStoreFlushes(0);
1017 assertArrayEquals(family
, storeFlushDesc
.getFamilyName().toByteArray());
1018 assertEquals("family", storeFlushDesc
.getStoreHomeDir());
1019 if (flushDesc
.getAction() == FlushAction
.START_FLUSH
) {
1020 assertEquals(0, storeFlushDesc
.getFlushOutputCount());
1022 assertEquals(1, storeFlushDesc
.getFlushOutputCount()); //only one file from flush
1023 assertTrue(storeFiles
.contains(storeFlushDesc
.getFlushOutput(0)));
1026 flushDescriptors
.add(entry
);
1030 assertEquals(3 * 2, flushDescriptors
.size()); // START_FLUSH and COMMIT_FLUSH per flush
1032 // now write those markers to the recovered edits again.
1034 Path recoveredEditsDir
= WALSplitter
.getRegionDirRecoveredEditsDir(regiondir
);
1036 Path recoveredEdits
= new Path(recoveredEditsDir
, String
.format("%019d", 1000));
1037 fs
.create(recoveredEdits
);
1038 WALProvider
.Writer writer
= wals
.createRecoveredEditsWriter(fs
, recoveredEdits
);
1040 for (WAL
.Entry entry
: flushDescriptors
) {
1041 writer
.append(entry
);
1045 if (null != reader
) {
1048 } catch (IOException exception
) {
1049 LOG
.warn("Problem closing wal: " + exception
.getMessage());
1050 LOG
.debug("exception details", exception
);
1056 // close the region now, and reopen again
1058 region
= HRegion
.openHRegion(region
, null);
1060 // now check whether we have can read back the data from region
1061 for (long i
= minSeqId
; i
< maxSeqId
; i
++) {
1062 Get get
= new Get(Bytes
.toBytes(i
));
1063 Result result
= region
.get(get
);
1064 byte[] value
= result
.getValue(family
, Bytes
.toBytes(i
));
1065 assertArrayEquals(Bytes
.toBytes(i
), value
);
1068 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1074 static class IsFlushWALMarker
implements ArgumentMatcher
<WALEdit
> {
1075 volatile FlushAction
[] actions
;
1076 public IsFlushWALMarker(FlushAction
... actions
) {
1077 this.actions
= actions
;
1080 public boolean matches(WALEdit edit
) {
1081 List
<Cell
> cells
= edit
.getCells();
1082 if (cells
.isEmpty()) {
1085 if (WALEdit
.isMetaEditFamily(cells
.get(0))) {
1086 FlushDescriptor desc
;
1088 desc
= WALEdit
.getFlushDescriptor(cells
.get(0));
1089 } catch (IOException e
) {
1090 LOG
.warn(e
.toString(), e
);
1094 for (FlushAction action
: actions
) {
1095 if (desc
.getAction() == action
) {
1103 public IsFlushWALMarker
set(FlushAction
... actions
) {
1104 this.actions
= actions
;
1110 public void testFlushMarkersWALFail() throws Exception
{
1111 // test the cases where the WAL append for flush markers fail.
1112 byte[] family
= Bytes
.toBytes("family");
1114 // spy an actual WAL implementation to throw exception (was not able to mock)
1115 Path logDir
= TEST_UTIL
.getDataTestDirOnTestFS(method
+ "log");
1117 final Configuration walConf
= new Configuration(TEST_UTIL
.getConfiguration());
1118 FSUtils
.setRootDir(walConf
, logDir
);
1119 // Make up a WAL that we can manipulate at append time.
1120 class FailAppendFlushMarkerWAL
extends FSHLog
{
1121 volatile FlushAction
[] flushActions
= null;
1123 public FailAppendFlushMarkerWAL(FileSystem fs
, Path root
, String logDir
, Configuration conf
)
1124 throws IOException
{
1125 super(fs
, root
, logDir
, conf
);
1129 protected Writer
createWriterInstance(Path path
) throws IOException
{
1130 final Writer w
= super.createWriterInstance(path
);
1131 return new Writer() {
1133 public void close() throws IOException
{
1138 public void sync() throws IOException
{
1143 public void append(Entry entry
) throws IOException
{
1144 List
<Cell
> cells
= entry
.getEdit().getCells();
1145 if (WALEdit
.isMetaEditFamily(cells
.get(0))) {
1146 FlushDescriptor desc
= WALEdit
.getFlushDescriptor(cells
.get(0));
1148 for (FlushAction flushAction
: flushActions
) {
1149 if (desc
.getAction().equals(flushAction
)) {
1150 throw new IOException("Failed to append flush marker! " + flushAction
);
1159 public long getLength() {
1160 return w
.getLength();
1165 FailAppendFlushMarkerWAL wal
=
1166 new FailAppendFlushMarkerWAL(FileSystem
.get(walConf
), FSUtils
.getRootDir(walConf
),
1168 this.region
= initHRegion(tableName
, HConstants
.EMPTY_START_ROW
,
1169 HConstants
.EMPTY_END_ROW
, false, Durability
.USE_DEFAULT
, wal
, family
);
1172 Put put
= new Put(Bytes
.toBytes(i
));
1173 put
.setDurability(Durability
.SKIP_WAL
); // have to skip mocked wal
1174 put
.addColumn(family
, Bytes
.toBytes(i
), Bytes
.toBytes(i
));
1177 // 1. Test case where START_FLUSH throws exception
1178 wal
.flushActions
= new FlushAction
[] {FlushAction
.START_FLUSH
};
1180 // start cache flush will throw exception
1183 fail("This should have thrown exception");
1184 } catch (DroppedSnapshotException unexpected
) {
1185 // this should not be a dropped snapshot exception. Meaning that RS will not abort
1187 } catch (IOException expected
) {
1190 // The WAL is hosed now. It has two edits appended. We cannot roll the log without it
1191 // throwing a DroppedSnapshotException to force an abort. Just clean up the mess.
1195 // 2. Test case where START_FLUSH succeeds but COMMIT_FLUSH will throw exception
1196 wal
.flushActions
= new FlushAction
[] {FlushAction
.COMMIT_FLUSH
};
1197 wal
= new FailAppendFlushMarkerWAL(FileSystem
.get(walConf
), FSUtils
.getRootDir(walConf
),
1200 this.region
= initHRegion(tableName
, HConstants
.EMPTY_START_ROW
,
1201 HConstants
.EMPTY_END_ROW
, false, Durability
.USE_DEFAULT
, wal
, family
);
1204 // 3. Test case where ABORT_FLUSH will throw exception.
1205 // Even if ABORT_FLUSH throws exception, we should not fail with IOE, but continue with
1206 // DroppedSnapshotException. Below COMMMIT_FLUSH will cause flush to abort
1207 wal
.flushActions
= new FlushAction
[] {FlushAction
.COMMIT_FLUSH
, FlushAction
.ABORT_FLUSH
};
1211 fail("This should have thrown exception");
1212 } catch (DroppedSnapshotException expected
) {
1213 // we expect this exception, since we were able to write the snapshot, but failed to
1214 // write the flush marker to WAL
1215 } catch (IOException unexpected
) {
1220 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1226 public void testGetWhileRegionClose() throws IOException
{
1227 Configuration hc
= initSplit();
1229 byte[][] families
= { fam1
, fam2
, fam3
};
1231 // Setting up region
1232 this.region
= initHRegion(tableName
, method
, hc
, families
);
1234 // Put data in region
1235 final int startRow
= 100;
1236 putData(startRow
, numRows
, qual1
, families
);
1237 putData(startRow
, numRows
, qual2
, families
);
1238 putData(startRow
, numRows
, qual3
, families
);
1239 final AtomicBoolean done
= new AtomicBoolean(false);
1240 final AtomicInteger gets
= new AtomicInteger(0);
1241 GetTillDoneOrException
[] threads
= new GetTillDoneOrException
[10];
1243 // Set ten threads running concurrently getting from the region.
1244 for (int i
= 0; i
< threads
.length
/ 2; i
++) {
1245 threads
[i
] = new GetTillDoneOrException(i
, Bytes
.toBytes("" + startRow
), done
, gets
);
1246 threads
[i
].setDaemon(true);
1249 // Artificially make the condition by setting closing flag explicitly.
1250 // I can't make the issue happen with a call to region.close().
1251 this.region
.closing
.set(true);
1252 for (int i
= threads
.length
/ 2; i
< threads
.length
; i
++) {
1253 threads
[i
] = new GetTillDoneOrException(i
, Bytes
.toBytes("" + startRow
), done
, gets
);
1254 threads
[i
].setDaemon(true);
1258 if (this.region
!= null) {
1259 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1263 for (GetTillDoneOrException t
: threads
) {
1266 } catch (InterruptedException e
) {
1267 e
.printStackTrace();
1270 LOG
.info("Exception=" + t
.e
);
1271 assertFalse("Found a NPE in " + t
.getName(), t
.e
instanceof NullPointerException
);
1275 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1281 * Thread that does get on single row until 'done' flag is flipped. If an
1282 * exception causes us to fail, it records it.
1284 class GetTillDoneOrException
extends Thread
{
1285 private final Get g
;
1286 private final AtomicBoolean done
;
1287 private final AtomicInteger count
;
1288 private Exception e
;
1290 GetTillDoneOrException(final int i
, final byte[] r
, final AtomicBoolean d
,
1291 final AtomicInteger c
) {
1292 super("getter." + i
);
1293 this.g
= new Get(r
);
1300 while (!this.done
.get()) {
1302 assertTrue(region
.get(g
).size() > 0);
1303 this.count
.incrementAndGet();
1304 } catch (Exception e
) {
1313 * An involved filter test. Has multiple column families and deletes in mix.
1316 public void testWeirdCacheBehaviour() throws Exception
{
1317 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
1318 byte[][] FAMILIES
= new byte[][] { Bytes
.toBytes("trans-blob"), Bytes
.toBytes("trans-type"),
1319 Bytes
.toBytes("trans-date"), Bytes
.toBytes("trans-tags"), Bytes
.toBytes("trans-group") };
1320 this.region
= initHRegion(tableName
, method
, CONF
, FAMILIES
);
1322 String value
= "this is the value";
1323 String value2
= "this is some other value";
1324 String keyPrefix1
= "prefix1";
1325 String keyPrefix2
= "prefix2";
1326 String keyPrefix3
= "prefix3";
1327 putRows(this.region
, 3, value
, keyPrefix1
);
1328 putRows(this.region
, 3, value
, keyPrefix2
);
1329 putRows(this.region
, 3, value
, keyPrefix3
);
1330 putRows(this.region
, 3, value2
, keyPrefix1
);
1331 putRows(this.region
, 3, value2
, keyPrefix2
);
1332 putRows(this.region
, 3, value2
, keyPrefix3
);
1333 System
.out
.println("Checking values for key: " + keyPrefix1
);
1334 assertEquals("Got back incorrect number of rows from scan", 3,
1335 getNumberOfRows(keyPrefix1
, value2
, this.region
));
1336 System
.out
.println("Checking values for key: " + keyPrefix2
);
1337 assertEquals("Got back incorrect number of rows from scan", 3,
1338 getNumberOfRows(keyPrefix2
, value2
, this.region
));
1339 System
.out
.println("Checking values for key: " + keyPrefix3
);
1340 assertEquals("Got back incorrect number of rows from scan", 3,
1341 getNumberOfRows(keyPrefix3
, value2
, this.region
));
1342 deleteColumns(this.region
, value2
, keyPrefix1
);
1343 deleteColumns(this.region
, value2
, keyPrefix2
);
1344 deleteColumns(this.region
, value2
, keyPrefix3
);
1345 System
.out
.println("Starting important checks.....");
1346 assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1
, 0,
1347 getNumberOfRows(keyPrefix1
, value2
, this.region
));
1348 assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2
, 0,
1349 getNumberOfRows(keyPrefix2
, value2
, this.region
));
1350 assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3
, 0,
1351 getNumberOfRows(keyPrefix3
, value2
, this.region
));
1353 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1359 public void testAppendWithReadOnlyTable() throws Exception
{
1360 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
1361 this.region
= initHRegion(tableName
, method
, CONF
, true, Bytes
.toBytes("somefamily"));
1362 boolean exceptionCaught
= false;
1363 Append append
= new Append(Bytes
.toBytes("somerow"));
1364 append
.setDurability(Durability
.SKIP_WAL
);
1365 append
.addColumn(Bytes
.toBytes("somefamily"), Bytes
.toBytes("somequalifier"),
1366 Bytes
.toBytes("somevalue"));
1368 region
.append(append
);
1369 } catch (IOException e
) {
1370 exceptionCaught
= true;
1372 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1375 assertTrue(exceptionCaught
== true);
1379 public void testIncrWithReadOnlyTable() throws Exception
{
1380 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
1381 this.region
= initHRegion(tableName
, method
, CONF
, true, Bytes
.toBytes("somefamily"));
1382 boolean exceptionCaught
= false;
1383 Increment inc
= new Increment(Bytes
.toBytes("somerow"));
1384 inc
.setDurability(Durability
.SKIP_WAL
);
1385 inc
.addColumn(Bytes
.toBytes("somefamily"), Bytes
.toBytes("somequalifier"), 1L);
1387 region
.increment(inc
);
1388 } catch (IOException e
) {
1389 exceptionCaught
= true;
1391 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1394 assertTrue(exceptionCaught
== true);
1397 private void deleteColumns(HRegion r
, String value
, String keyPrefix
) throws IOException
{
1398 InternalScanner scanner
= buildScanner(keyPrefix
, value
, r
);
1400 boolean more
= false;
1401 List
<Cell
> results
= new ArrayList
<>();
1403 more
= scanner
.next(results
);
1404 if (results
!= null && !results
.isEmpty())
1408 Delete delete
= new Delete(CellUtil
.cloneRow(results
.get(0)));
1409 delete
.addColumn(Bytes
.toBytes("trans-tags"), Bytes
.toBytes("qual2"));
1413 assertEquals("Did not perform correct number of deletes", 3, count
);
1416 private int getNumberOfRows(String keyPrefix
, String value
, HRegion r
) throws Exception
{
1417 InternalScanner resultScanner
= buildScanner(keyPrefix
, value
, r
);
1418 int numberOfResults
= 0;
1419 List
<Cell
> results
= new ArrayList
<>();
1420 boolean more
= false;
1422 more
= resultScanner
.next(results
);
1423 if (results
!= null && !results
.isEmpty())
1427 for (Cell kv
: results
) {
1428 System
.out
.println("kv=" + kv
.toString() + ", " + Bytes
.toString(CellUtil
.cloneValue(kv
)));
1432 return numberOfResults
;
1435 private InternalScanner
buildScanner(String keyPrefix
, String value
, HRegion r
)
1436 throws IOException
{
1437 // Defaults FilterList.Operator.MUST_PASS_ALL.
1438 FilterList allFilters
= new FilterList();
1439 allFilters
.addFilter(new PrefixFilter(Bytes
.toBytes(keyPrefix
)));
1440 // Only return rows where this column value exists in the row.
1441 SingleColumnValueFilter filter
= new SingleColumnValueFilter(Bytes
.toBytes("trans-tags"),
1442 Bytes
.toBytes("qual2"), CompareOp
.EQUAL
, Bytes
.toBytes(value
));
1443 filter
.setFilterIfMissing(true);
1444 allFilters
.addFilter(filter
);
1445 Scan scan
= new Scan();
1446 scan
.addFamily(Bytes
.toBytes("trans-blob"));
1447 scan
.addFamily(Bytes
.toBytes("trans-type"));
1448 scan
.addFamily(Bytes
.toBytes("trans-date"));
1449 scan
.addFamily(Bytes
.toBytes("trans-tags"));
1450 scan
.addFamily(Bytes
.toBytes("trans-group"));
1451 scan
.setFilter(allFilters
);
1452 return r
.getScanner(scan
);
1455 private void putRows(HRegion r
, int numRows
, String value
, String key
) throws IOException
{
1456 for (int i
= 0; i
< numRows
; i
++) {
1457 String row
= key
+ "_" + i
/* UUID.randomUUID().toString() */;
1458 System
.out
.println(String
.format("Saving row: %s, with value %s", row
, value
));
1459 Put put
= new Put(Bytes
.toBytes(row
));
1460 put
.setDurability(Durability
.SKIP_WAL
);
1461 put
.addColumn(Bytes
.toBytes("trans-blob"), null, Bytes
.toBytes("value for blob"));
1462 put
.addColumn(Bytes
.toBytes("trans-type"), null, Bytes
.toBytes("statement"));
1463 put
.addColumn(Bytes
.toBytes("trans-date"), null, Bytes
.toBytes("20090921010101999"));
1464 put
.addColumn(Bytes
.toBytes("trans-tags"), Bytes
.toBytes("qual2"), Bytes
.toBytes(value
));
1465 put
.addColumn(Bytes
.toBytes("trans-group"), null, Bytes
.toBytes("adhocTransactionGroupId"));
1471 public void testFamilyWithAndWithoutColon() throws Exception
{
1472 byte[] cf
= Bytes
.toBytes(COLUMN_FAMILY
);
1473 this.region
= initHRegion(tableName
, method
, CONF
, cf
);
1475 Put p
= new Put(tableName
.toBytes());
1476 byte[] cfwithcolon
= Bytes
.toBytes(COLUMN_FAMILY
+ ":");
1477 p
.addColumn(cfwithcolon
, cfwithcolon
, cfwithcolon
);
1478 boolean exception
= false;
1481 } catch (NoSuchColumnFamilyException e
) {
1484 assertTrue(exception
);
1486 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1492 public void testBatchPut_whileNoRowLocksHeld() throws IOException
{
1493 final Put
[] puts
= new Put
[10];
1494 MetricsWALSource source
= CompatibilitySingletonFactory
.getInstance(MetricsWALSource
.class);
1496 long syncs
= prepareRegionForBachPut(puts
, source
, false);
1498 OperationStatus
[] codes
= this.region
.batchMutate(puts
);
1499 assertEquals(10, codes
.length
);
1500 for (int i
= 0; i
< 10; i
++) {
1501 assertEquals(OperationStatusCode
.SUCCESS
, codes
[i
].getOperationStatusCode());
1503 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
+ 1, source
);
1505 LOG
.info("Next a batch put with one invalid family");
1506 puts
[5].addColumn(Bytes
.toBytes("BAD_CF"), qual
, value
);
1507 codes
= this.region
.batchMutate(puts
);
1508 assertEquals(10, codes
.length
);
1509 for (int i
= 0; i
< 10; i
++) {
1510 assertEquals((i
== 5) ? OperationStatusCode
.BAD_FAMILY
: OperationStatusCode
.SUCCESS
,
1511 codes
[i
].getOperationStatusCode());
1514 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
+ 2, source
);
1516 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1522 public void testBatchPut_whileMultipleRowLocksHeld() throws Exception
{
1523 final Put
[] puts
= new Put
[10];
1524 MetricsWALSource source
= CompatibilitySingletonFactory
.getInstance(MetricsWALSource
.class);
1526 long syncs
= prepareRegionForBachPut(puts
, source
, false);
1528 puts
[5].addColumn(Bytes
.toBytes("BAD_CF"), qual
, value
);
1530 LOG
.info("batchPut will have to break into four batches to avoid row locks");
1531 RowLock rowLock1
= region
.getRowLock(Bytes
.toBytes("row_2"));
1532 RowLock rowLock2
= region
.getRowLock(Bytes
.toBytes("row_1"));
1533 RowLock rowLock3
= region
.getRowLock(Bytes
.toBytes("row_3"));
1534 RowLock rowLock4
= region
.getRowLock(Bytes
.toBytes("row_3"), true);
1536 MultithreadedTestUtil
.TestContext ctx
= new MultithreadedTestUtil
.TestContext(CONF
);
1537 final AtomicReference
<OperationStatus
[]> retFromThread
= new AtomicReference
<>();
1538 final CountDownLatch startingPuts
= new CountDownLatch(1);
1539 final CountDownLatch startingClose
= new CountDownLatch(1);
1540 TestThread putter
= new TestThread(ctx
) {
1542 public void doWork() throws IOException
{
1543 startingPuts
.countDown();
1544 retFromThread
.set(region
.batchMutate(puts
));
1547 LOG
.info("...starting put thread while holding locks");
1548 ctx
.addThread(putter
);
1551 // Now attempt to close the region from another thread. Prior to HBASE-12565
1552 // this would cause the in-progress batchMutate operation to to fail with
1553 // exception because it use to release and re-acquire the close-guard lock
1554 // between batches. Caller then didn't get status indicating which writes succeeded.
1555 // We now expect this thread to block until the batchMutate call finishes.
1556 Thread regionCloseThread
= new TestThread(ctx
) {
1558 public void doWork() {
1560 startingPuts
.await();
1561 // Give some time for the batch mutate to get in.
1562 // We don't want to race with the mutate
1564 startingClose
.countDown();
1565 HBaseTestingUtility
.closeRegionAndWAL(region
);
1566 } catch (IOException e
) {
1567 throw new RuntimeException(e
);
1568 } catch (InterruptedException e
) {
1569 throw new RuntimeException(e
);
1573 regionCloseThread
.start();
1575 startingClose
.await();
1576 startingPuts
.await();
1578 LOG
.info("...releasing row lock 1, which should let put thread continue");
1582 waitForCounter(source
, "syncTimeNumOps", syncs
+ 1);
1584 LOG
.info("...joining on put thread");
1586 regionCloseThread
.join();
1588 OperationStatus
[] codes
= retFromThread
.get();
1589 for (int i
= 0; i
< codes
.length
; i
++) {
1590 assertEquals((i
== 5) ? OperationStatusCode
.BAD_FAMILY
: OperationStatusCode
.SUCCESS
,
1591 codes
[i
].getOperationStatusCode());
1595 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1600 private void waitForCounter(MetricsWALSource source
, String metricName
, long expectedCount
)
1601 throws InterruptedException
{
1602 long startWait
= System
.currentTimeMillis();
1604 while ((currentCount
= metricsAssertHelper
.getCounter(metricName
, source
)) < expectedCount
) {
1606 if (System
.currentTimeMillis() - startWait
> 10000) {
1607 fail(String
.format("Timed out waiting for '%s' >= '%s', currentCount=%s", metricName
,
1608 expectedCount
, currentCount
));
1614 public void testAtomicBatchPut() throws IOException
{
1615 final Put
[] puts
= new Put
[10];
1616 MetricsWALSource source
= CompatibilitySingletonFactory
.getInstance(MetricsWALSource
.class);
1618 long syncs
= prepareRegionForBachPut(puts
, source
, false);
1620 // 1. Straight forward case, should succeed
1621 MutationBatchOperation batchOp
= new MutationBatchOperation(region
, puts
, true,
1622 HConstants
.NO_NONCE
, HConstants
.NO_NONCE
);
1623 OperationStatus
[] codes
= this.region
.batchMutate(batchOp
);
1624 assertEquals(10, codes
.length
);
1625 for (int i
= 0; i
< 10; i
++) {
1626 assertEquals(OperationStatusCode
.SUCCESS
, codes
[i
].getOperationStatusCode());
1628 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
+ 1, source
);
1630 // 2. Failed to get lock
1631 RowLock lock
= region
.getRowLock(Bytes
.toBytes("row_" + 3));
1632 // Method {@link HRegion#getRowLock(byte[])} is reentrant. As 'row_3' is locked in this
1633 // thread, need to run {@link HRegion#batchMutate(HRegion.BatchOperation)} in different thread
1634 MultithreadedTestUtil
.TestContext ctx
= new MultithreadedTestUtil
.TestContext(CONF
);
1635 final AtomicReference
<IOException
> retFromThread
= new AtomicReference
<>();
1636 final CountDownLatch finishedPuts
= new CountDownLatch(1);
1637 final MutationBatchOperation finalBatchOp
= new MutationBatchOperation(region
, puts
, true,
1640 HConstants
.NO_NONCE
);
1641 TestThread putter
= new TestThread(ctx
) {
1643 public void doWork() throws IOException
{
1645 region
.batchMutate(finalBatchOp
);
1646 } catch (IOException ioe
) {
1647 LOG
.error("test failed!", ioe
);
1648 retFromThread
.set(ioe
);
1650 finishedPuts
.countDown();
1653 LOG
.info("...starting put thread while holding locks");
1654 ctx
.addThread(putter
);
1656 LOG
.info("...waiting for batch puts while holding locks");
1658 finishedPuts
.await();
1659 } catch (InterruptedException e
) {
1660 LOG
.error("Interrupted!", e
);
1666 assertNotNull(retFromThread
.get());
1667 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
+ 1, source
);
1669 // 3. Exception thrown in validation
1670 LOG
.info("Next a batch put with one invalid family");
1671 puts
[5].addColumn(Bytes
.toBytes("BAD_CF"), qual
, value
);
1672 batchOp
= new MutationBatchOperation(region
, puts
, true, HConstants
.NO_NONCE
,
1673 HConstants
.NO_NONCE
);
1674 thrown
.expect(NoSuchColumnFamilyException
.class);
1675 this.region
.batchMutate(batchOp
);
1677 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1683 public void testBatchPutWithTsSlop() throws Exception
{
1684 // add data with a timestamp that is too recent for range. Ensure assert
1685 CONF
.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs", 1000);
1686 final Put
[] puts
= new Put
[10];
1687 MetricsWALSource source
= CompatibilitySingletonFactory
.getInstance(MetricsWALSource
.class);
1690 long syncs
= prepareRegionForBachPut(puts
, source
, true);
1692 OperationStatus
[] codes
= this.region
.batchMutate(puts
);
1693 assertEquals(10, codes
.length
);
1694 for (int i
= 0; i
< 10; i
++) {
1695 assertEquals(OperationStatusCode
.SANITY_CHECK_FAILURE
, codes
[i
].getOperationStatusCode());
1697 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
, source
);
1699 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1705 * @return syncs initial syncTimeNumOps
1707 private long prepareRegionForBachPut(final Put
[] puts
, final MetricsWALSource source
,
1708 boolean slop
) throws IOException
{
1709 this.region
= initHRegion(tableName
, method
, CONF
, COLUMN_FAMILY_BYTES
);
1711 LOG
.info("First a batch put with all valid puts");
1712 for (int i
= 0; i
< puts
.length
; i
++) {
1713 puts
[i
] = slop ?
new Put(Bytes
.toBytes("row_" + i
), Long
.MAX_VALUE
- 100) :
1714 new Put(Bytes
.toBytes("row_" + i
));
1715 puts
[i
].addColumn(COLUMN_FAMILY_BYTES
, qual
, value
);
1718 long syncs
= metricsAssertHelper
.getCounter("syncTimeNumOps", source
);
1719 metricsAssertHelper
.assertCounter("syncTimeNumOps", syncs
, source
);
1723 // ////////////////////////////////////////////////////////////////////////////
1724 // checkAndMutate tests
1725 // ////////////////////////////////////////////////////////////////////////////
1727 public void testCheckAndMutate_WithEmptyRowValue() throws IOException
{
1728 byte[] row1
= Bytes
.toBytes("row1");
1729 byte[] fam1
= Bytes
.toBytes("fam1");
1730 byte[] qf1
= Bytes
.toBytes("qualifier");
1731 byte[] emptyVal
= new byte[] {};
1732 byte[] val1
= Bytes
.toBytes("value1");
1733 byte[] val2
= Bytes
.toBytes("value2");
1735 // Setting up region
1736 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
1738 // Putting empty data in key
1739 Put put
= new Put(row1
);
1740 put
.addColumn(fam1
, qf1
, emptyVal
);
1742 // checkAndPut with empty value
1743 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(
1744 emptyVal
), put
, true);
1747 // Putting data in key
1748 put
= new Put(row1
);
1749 put
.addColumn(fam1
, qf1
, val1
);
1751 // checkAndPut with correct value
1752 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(emptyVal
),
1756 // not empty anymore
1757 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(emptyVal
),
1761 Delete delete
= new Delete(row1
);
1762 delete
.addColumn(fam1
, qf1
);
1763 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(emptyVal
),
1767 put
= new Put(row1
);
1768 put
.addColumn(fam1
, qf1
, val2
);
1769 // checkAndPut with correct value
1770 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(val1
),
1774 // checkAndDelete with correct value
1775 delete
= new Delete(row1
);
1776 delete
.addColumn(fam1
, qf1
);
1777 delete
.addColumn(fam1
, qf1
);
1778 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(val2
),
1782 delete
= new Delete(row1
);
1783 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(emptyVal
),
1787 // checkAndPut looking for a null value
1788 put
= new Put(row1
);
1789 put
.addColumn(fam1
, qf1
, val1
);
1792 .checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new NullComparator(), put
, true);
1795 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1801 public void testCheckAndMutate_WithWrongValue() throws IOException
{
1802 byte[] row1
= Bytes
.toBytes("row1");
1803 byte[] fam1
= Bytes
.toBytes("fam1");
1804 byte[] qf1
= Bytes
.toBytes("qualifier");
1805 byte[] val1
= Bytes
.toBytes("value1");
1806 byte[] val2
= Bytes
.toBytes("value2");
1807 BigDecimal bd1
= new BigDecimal(Double
.MAX_VALUE
);
1808 BigDecimal bd2
= new BigDecimal(Double
.MIN_VALUE
);
1810 // Setting up region
1811 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
1813 // Putting data in key
1814 Put put
= new Put(row1
);
1815 put
.addColumn(fam1
, qf1
, val1
);
1818 // checkAndPut with wrong value
1819 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(
1821 assertEquals(false, res
);
1823 // checkAndDelete with wrong value
1824 Delete delete
= new Delete(row1
);
1825 delete
.addFamily(fam1
);
1826 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(val2
),
1828 assertEquals(false, res
);
1830 // Putting data in key
1831 put
= new Put(row1
);
1832 put
.addColumn(fam1
, qf1
, Bytes
.toBytes(bd1
));
1835 // checkAndPut with wrong value
1837 region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BigDecimalComparator(
1839 assertEquals(false, res
);
1841 // checkAndDelete with wrong value
1842 delete
= new Delete(row1
);
1843 delete
.addFamily(fam1
);
1845 region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BigDecimalComparator(
1847 assertEquals(false, res
);
1849 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1855 public void testCheckAndMutate_WithCorrectValue() throws IOException
{
1856 byte[] row1
= Bytes
.toBytes("row1");
1857 byte[] fam1
= Bytes
.toBytes("fam1");
1858 byte[] qf1
= Bytes
.toBytes("qualifier");
1859 byte[] val1
= Bytes
.toBytes("value1");
1860 BigDecimal bd1
= new BigDecimal(Double
.MIN_VALUE
);
1862 // Setting up region
1863 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
1865 // Putting data in key
1866 Put put
= new Put(row1
);
1867 put
.addColumn(fam1
, qf1
, val1
);
1870 // checkAndPut with correct value
1871 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(
1873 assertEquals(true, res
);
1875 // checkAndDelete with correct value
1876 Delete delete
= new Delete(row1
);
1877 delete
.addColumn(fam1
, qf1
);
1878 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(val1
),
1880 assertEquals(true, res
);
1882 // Putting data in key
1883 put
= new Put(row1
);
1884 put
.addColumn(fam1
, qf1
, Bytes
.toBytes(bd1
));
1887 // checkAndPut with correct value
1889 region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BigDecimalComparator(
1891 assertEquals(true, res
);
1893 // checkAndDelete with correct value
1894 delete
= new Delete(row1
);
1895 delete
.addColumn(fam1
, qf1
);
1897 region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BigDecimalComparator(
1898 bd1
), delete
, true);
1899 assertEquals(true, res
);
1901 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
1907 public void testCheckAndMutate_WithNonEqualCompareOp() throws IOException
{
1908 byte[] row1
= Bytes
.toBytes("row1");
1909 byte[] fam1
= Bytes
.toBytes("fam1");
1910 byte[] qf1
= Bytes
.toBytes("qualifier");
1911 byte[] val1
= Bytes
.toBytes("value1");
1912 byte[] val2
= Bytes
.toBytes("value2");
1913 byte[] val3
= Bytes
.toBytes("value3");
1914 byte[] val4
= Bytes
.toBytes("value4");
1916 // Setting up region
1917 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
1919 // Putting val3 in key
1920 Put put
= new Put(row1
);
1921 put
.addColumn(fam1
, qf1
, val3
);
1924 // Test CompareOp.LESS: original = val3, compare with val3, fail
1925 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS
,
1926 new BinaryComparator(val3
), put
, true);
1927 assertEquals(false, res
);
1929 // Test CompareOp.LESS: original = val3, compare with val4, fail
1930 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS
,
1931 new BinaryComparator(val4
), put
, true);
1932 assertEquals(false, res
);
1934 // Test CompareOp.LESS: original = val3, compare with val2,
1935 // succeed (now value = val2)
1936 put
= new Put(row1
);
1937 put
.addColumn(fam1
, qf1
, val2
);
1938 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS
,
1939 new BinaryComparator(val2
), put
, true);
1940 assertEquals(true, res
);
1942 // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val3, fail
1943 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS_OR_EQUAL
,
1944 new BinaryComparator(val3
), put
, true);
1945 assertEquals(false, res
);
1947 // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val2,
1948 // succeed (value still = val2)
1949 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS_OR_EQUAL
,
1950 new BinaryComparator(val2
), put
, true);
1951 assertEquals(true, res
);
1953 // Test CompareOp.LESS_OR_EQUAL: original = val2, compare with val1,
1954 // succeed (now value = val3)
1955 put
= new Put(row1
);
1956 put
.addColumn(fam1
, qf1
, val3
);
1957 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.LESS_OR_EQUAL
,
1958 new BinaryComparator(val1
), put
, true);
1959 assertEquals(true, res
);
1961 // Test CompareOp.GREATER: original = val3, compare with val3, fail
1962 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER
,
1963 new BinaryComparator(val3
), put
, true);
1964 assertEquals(false, res
);
1966 // Test CompareOp.GREATER: original = val3, compare with val2, fail
1967 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER
,
1968 new BinaryComparator(val2
), put
, true);
1969 assertEquals(false, res
);
1971 // Test CompareOp.GREATER: original = val3, compare with val4,
1972 // succeed (now value = val2)
1973 put
= new Put(row1
);
1974 put
.addColumn(fam1
, qf1
, val2
);
1975 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER
,
1976 new BinaryComparator(val4
), put
, true);
1977 assertEquals(true, res
);
1979 // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val1, fail
1980 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER_OR_EQUAL
,
1981 new BinaryComparator(val1
), put
, true);
1982 assertEquals(false, res
);
1984 // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val2,
1985 // succeed (value still = val2)
1986 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER_OR_EQUAL
,
1987 new BinaryComparator(val2
), put
, true);
1988 assertEquals(true, res
);
1990 // Test CompareOp.GREATER_OR_EQUAL: original = val2, compare with val3, succeed
1991 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.GREATER_OR_EQUAL
,
1992 new BinaryComparator(val3
), put
, true);
1993 assertEquals(true, res
);
1995 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2001 public void testCheckAndPut_ThatPutWasWritten() throws IOException
{
2002 byte[] row1
= Bytes
.toBytes("row1");
2003 byte[] fam1
= Bytes
.toBytes("fam1");
2004 byte[] fam2
= Bytes
.toBytes("fam2");
2005 byte[] qf1
= Bytes
.toBytes("qualifier");
2006 byte[] val1
= Bytes
.toBytes("value1");
2007 byte[] val2
= Bytes
.toBytes("value2");
2009 byte[][] families
= { fam1
, fam2
};
2011 // Setting up region
2012 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2014 // Putting data in the key to check
2015 Put put
= new Put(row1
);
2016 put
.addColumn(fam1
, qf1
, val1
);
2019 // Creating put to add
2020 long ts
= System
.currentTimeMillis();
2021 KeyValue kv
= new KeyValue(row1
, fam2
, qf1
, ts
, KeyValue
.Type
.Put
, val2
);
2022 put
= new Put(row1
);
2025 // checkAndPut with wrong value
2026 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(
2028 assertEquals(true, res
);
2030 Get get
= new Get(row1
);
2031 get
.addColumn(fam2
, qf1
);
2032 Cell
[] actual
= region
.get(get
).rawCells();
2034 Cell
[] expected
= { kv
};
2036 assertEquals(expected
.length
, actual
.length
);
2037 for (int i
= 0; i
< actual
.length
; i
++) {
2038 assertEquals(expected
[i
], actual
[i
]);
2041 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2047 public void testCheckAndPut_wrongRowInPut() throws IOException
{
2048 this.region
= initHRegion(tableName
, method
, CONF
, COLUMNS
);
2050 Put put
= new Put(row2
);
2051 put
.addColumn(fam1
, qual1
, value1
);
2053 region
.checkAndMutate(row
, fam1
, qual1
, CompareOperator
.EQUAL
,
2054 new BinaryComparator(value2
), put
, false);
2056 } catch (org
.apache
.hadoop
.hbase
.DoNotRetryIOException expected
) {
2057 // expected exception.
2060 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2066 public void testCheckAndDelete_ThatDeleteWasWritten() throws IOException
{
2067 byte[] row1
= Bytes
.toBytes("row1");
2068 byte[] fam1
= Bytes
.toBytes("fam1");
2069 byte[] fam2
= Bytes
.toBytes("fam2");
2070 byte[] qf1
= Bytes
.toBytes("qualifier1");
2071 byte[] qf2
= Bytes
.toBytes("qualifier2");
2072 byte[] qf3
= Bytes
.toBytes("qualifier3");
2073 byte[] val1
= Bytes
.toBytes("value1");
2074 byte[] val2
= Bytes
.toBytes("value2");
2075 byte[] val3
= Bytes
.toBytes("value3");
2076 byte[] emptyVal
= new byte[] {};
2078 byte[][] families
= { fam1
, fam2
};
2080 // Setting up region
2081 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2084 Put put
= new Put(row1
);
2085 put
.addColumn(fam1
, qf1
, val1
);
2089 put
= new Put(row1
);
2090 put
.addColumn(fam1
, qf1
, val2
);
2091 put
.addColumn(fam2
, qf1
, val3
);
2092 put
.addColumn(fam2
, qf2
, val2
);
2093 put
.addColumn(fam2
, qf3
, val1
);
2094 put
.addColumn(fam1
, qf3
, val1
);
2097 // Multi-column delete
2098 Delete delete
= new Delete(row1
);
2099 delete
.addColumn(fam1
, qf1
);
2100 delete
.addColumn(fam2
, qf1
);
2101 delete
.addColumn(fam1
, qf3
);
2102 boolean res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(
2103 val2
), delete
, true);
2104 assertEquals(true, res
);
2106 Get get
= new Get(row1
);
2107 get
.addColumn(fam1
, qf1
);
2108 get
.addColumn(fam1
, qf3
);
2109 get
.addColumn(fam2
, qf2
);
2110 Result r
= region
.get(get
);
2111 assertEquals(2, r
.size());
2112 assertArrayEquals(val1
, r
.getValue(fam1
, qf1
));
2113 assertArrayEquals(val2
, r
.getValue(fam2
, qf2
));
2116 delete
= new Delete(row1
);
2117 delete
.addFamily(fam2
);
2118 res
= region
.checkAndMutate(row1
, fam2
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(emptyVal
),
2120 assertEquals(true, res
);
2122 get
= new Get(row1
);
2123 r
= region
.get(get
);
2124 assertEquals(1, r
.size());
2125 assertArrayEquals(val1
, r
.getValue(fam1
, qf1
));
2128 delete
= new Delete(row1
);
2129 res
= region
.checkAndMutate(row1
, fam1
, qf1
, CompareOperator
.EQUAL
, new BinaryComparator(val1
),
2131 assertEquals(true, res
);
2132 get
= new Get(row1
);
2133 r
= region
.get(get
);
2134 assertEquals(0, r
.size());
2136 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2141 // ////////////////////////////////////////////////////////////////////////////
2143 // ////////////////////////////////////////////////////////////////////////////
2145 public void testDelete_multiDeleteColumn() throws IOException
{
2146 byte[] row1
= Bytes
.toBytes("row1");
2147 byte[] fam1
= Bytes
.toBytes("fam1");
2148 byte[] qual
= Bytes
.toBytes("qualifier");
2149 byte[] value
= Bytes
.toBytes("value");
2151 Put put
= new Put(row1
);
2152 put
.addColumn(fam1
, qual
, 1, value
);
2153 put
.addColumn(fam1
, qual
, 2, value
);
2155 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
2159 // We do support deleting more than 1 'latest' version
2160 Delete delete
= new Delete(row1
);
2161 delete
.addColumn(fam1
, qual
);
2162 delete
.addColumn(fam1
, qual
);
2163 region
.delete(delete
);
2165 Get get
= new Get(row1
);
2166 get
.addFamily(fam1
);
2167 Result r
= region
.get(get
);
2168 assertEquals(0, r
.size());
2170 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2176 public void testDelete_CheckFamily() throws IOException
{
2177 byte[] row1
= Bytes
.toBytes("row1");
2178 byte[] fam1
= Bytes
.toBytes("fam1");
2179 byte[] fam2
= Bytes
.toBytes("fam2");
2180 byte[] fam3
= Bytes
.toBytes("fam3");
2181 byte[] fam4
= Bytes
.toBytes("fam4");
2183 // Setting up region
2184 this.region
= initHRegion(tableName
, method
, CONF
, fam1
, fam2
, fam3
);
2186 List
<Cell
> kvs
= new ArrayList
<>();
2187 kvs
.add(new KeyValue(row1
, fam4
, null, null));
2189 // testing existing family
2190 byte[] family
= fam2
;
2192 NavigableMap
<byte[], List
<Cell
>> deleteMap
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
2193 deleteMap
.put(family
, kvs
);
2194 region
.delete(deleteMap
, Durability
.SYNC_WAL
);
2195 } catch (Exception e
) {
2196 fail("Family " + new String(family
, StandardCharsets
.UTF_8
) + " does not exist");
2199 // testing non existing family
2203 NavigableMap
<byte[], List
<Cell
>> deleteMap
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
2204 deleteMap
.put(family
, kvs
);
2205 region
.delete(deleteMap
, Durability
.SYNC_WAL
);
2206 } catch (Exception e
) {
2209 assertEquals("Family " + new String(family
, StandardCharsets
.UTF_8
) + " does exist",
2212 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2218 public void testDelete_mixed() throws IOException
, InterruptedException
{
2219 byte[] fam
= Bytes
.toBytes("info");
2220 byte[][] families
= { fam
};
2221 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2223 EnvironmentEdgeManagerTestHelper
.injectEdge(new IncrementingEnvironmentEdge());
2225 byte[] row
= Bytes
.toBytes("table_name");
2227 byte[] serverinfo
= Bytes
.toBytes("serverinfo");
2228 byte[] splitA
= Bytes
.toBytes("splitA");
2229 byte[] splitB
= Bytes
.toBytes("splitB");
2232 Put put
= new Put(row
);
2233 put
.addColumn(fam
, splitA
, Bytes
.toBytes("reference_A"));
2237 put
.addColumn(fam
, splitB
, Bytes
.toBytes("reference_B"));
2241 put
.addColumn(fam
, serverinfo
, Bytes
.toBytes("ip_address"));
2244 // ok now delete a split:
2245 Delete delete
= new Delete(row
);
2246 delete
.addColumns(fam
, splitA
);
2247 region
.delete(delete
);
2249 // assert some things:
2250 Get get
= new Get(row
).addColumn(fam
, serverinfo
);
2251 Result result
= region
.get(get
);
2252 assertEquals(1, result
.size());
2254 get
= new Get(row
).addColumn(fam
, splitA
);
2255 result
= region
.get(get
);
2256 assertEquals(0, result
.size());
2258 get
= new Get(row
).addColumn(fam
, splitB
);
2259 result
= region
.get(get
);
2260 assertEquals(1, result
.size());
2262 // Assert that after a delete, I can put.
2264 put
.addColumn(fam
, splitA
, Bytes
.toBytes("reference_A"));
2267 result
= region
.get(get
);
2268 assertEquals(3, result
.size());
2270 // Now delete all... then test I can add stuff back
2271 delete
= new Delete(row
);
2272 region
.delete(delete
);
2273 assertEquals(0, region
.get(get
).size());
2275 region
.put(new Put(row
).addColumn(fam
, splitA
, Bytes
.toBytes("reference_A")));
2276 result
= region
.get(get
);
2277 assertEquals(1, result
.size());
2279 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2285 public void testDeleteRowWithFutureTs() throws IOException
{
2286 byte[] fam
= Bytes
.toBytes("info");
2287 byte[][] families
= { fam
};
2288 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2290 byte[] row
= Bytes
.toBytes("table_name");
2292 byte[] serverinfo
= Bytes
.toBytes("serverinfo");
2294 // add data in the far future
2295 Put put
= new Put(row
);
2296 put
.addColumn(fam
, serverinfo
, HConstants
.LATEST_TIMESTAMP
- 5, Bytes
.toBytes("value"));
2299 // now delete something in the present
2300 Delete delete
= new Delete(row
);
2301 region
.delete(delete
);
2303 // make sure we still see our data
2304 Get get
= new Get(row
).addColumn(fam
, serverinfo
);
2305 Result result
= region
.get(get
);
2306 assertEquals(1, result
.size());
2308 // delete the future row
2309 delete
= new Delete(row
, HConstants
.LATEST_TIMESTAMP
- 3);
2310 region
.delete(delete
);
2312 // make sure it is gone
2313 get
= new Get(row
).addColumn(fam
, serverinfo
);
2314 result
= region
.get(get
);
2315 assertEquals(0, result
.size());
2317 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2323 * Tests that the special LATEST_TIMESTAMP option for puts gets replaced by
2324 * the actual timestamp
2327 public void testPutWithLatestTS() throws IOException
{
2328 byte[] fam
= Bytes
.toBytes("info");
2329 byte[][] families
= { fam
};
2330 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2332 byte[] row
= Bytes
.toBytes("row1");
2334 byte[] qual
= Bytes
.toBytes("qual");
2336 // add data with LATEST_TIMESTAMP, put without WAL
2337 Put put
= new Put(row
);
2338 put
.addColumn(fam
, qual
, HConstants
.LATEST_TIMESTAMP
, Bytes
.toBytes("value"));
2341 // Make sure it shows up with an actual timestamp
2342 Get get
= new Get(row
).addColumn(fam
, qual
);
2343 Result result
= region
.get(get
);
2344 assertEquals(1, result
.size());
2345 Cell kv
= result
.rawCells()[0];
2346 LOG
.info("Got: " + kv
);
2347 assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",
2348 kv
.getTimestamp() != HConstants
.LATEST_TIMESTAMP
);
2350 // Check same with WAL enabled (historically these took different
2351 // code paths, so check both)
2352 row
= Bytes
.toBytes("row2");
2354 put
.addColumn(fam
, qual
, HConstants
.LATEST_TIMESTAMP
, Bytes
.toBytes("value"));
2357 // Make sure it shows up with an actual timestamp
2358 get
= new Get(row
).addColumn(fam
, qual
);
2359 result
= region
.get(get
);
2360 assertEquals(1, result
.size());
2361 kv
= result
.rawCells()[0];
2362 LOG
.info("Got: " + kv
);
2363 assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",
2364 kv
.getTimestamp() != HConstants
.LATEST_TIMESTAMP
);
2366 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2373 * Tests that there is server-side filtering for invalid timestamp upper
2374 * bound. Note that the timestamp lower bound is automatically handled for us
2378 public void testPutWithTsSlop() throws IOException
{
2379 byte[] fam
= Bytes
.toBytes("info");
2380 byte[][] families
= { fam
};
2382 // add data with a timestamp that is too recent for range. Ensure assert
2383 CONF
.setInt("hbase.hregion.keyvalue.timestamp.slop.millisecs", 1000);
2384 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2385 boolean caughtExcep
= false;
2388 // no TS specified == use latest. should not error
2389 region
.put(new Put(row
).addColumn(fam
, Bytes
.toBytes("qual"), Bytes
.toBytes("value")));
2390 // TS out of range. should error
2391 region
.put(new Put(row
).addColumn(fam
, Bytes
.toBytes("qual"),
2392 System
.currentTimeMillis() + 2000, Bytes
.toBytes("value")));
2393 fail("Expected IOE for TS out of configured timerange");
2394 } catch (FailedSanityCheckException ioe
) {
2395 LOG
.debug("Received expected exception", ioe
);
2398 assertTrue("Should catch FailedSanityCheckException", caughtExcep
);
2400 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2406 public void testScanner_DeleteOneFamilyNotAnother() throws IOException
{
2407 byte[] fam1
= Bytes
.toBytes("columnA");
2408 byte[] fam2
= Bytes
.toBytes("columnB");
2409 this.region
= initHRegion(tableName
, method
, CONF
, fam1
, fam2
);
2411 byte[] rowA
= Bytes
.toBytes("rowA");
2412 byte[] rowB
= Bytes
.toBytes("rowB");
2414 byte[] value
= Bytes
.toBytes("value");
2416 Delete delete
= new Delete(rowA
);
2417 delete
.addFamily(fam1
);
2419 region
.delete(delete
);
2422 Put put
= new Put(rowA
);
2423 put
.addColumn(fam2
, null, value
);
2426 put
= new Put(rowB
);
2427 put
.addColumn(fam1
, null, value
);
2428 put
.addColumn(fam2
, null, value
);
2431 Scan scan
= new Scan();
2432 scan
.addFamily(fam1
).addFamily(fam2
);
2433 InternalScanner s
= region
.getScanner(scan
);
2434 List
<Cell
> results
= new ArrayList
<>();
2436 assertTrue(CellUtil
.matchingRows(results
.get(0), rowA
));
2440 assertTrue(CellUtil
.matchingRows(results
.get(0), rowB
));
2442 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2448 public void testDataInMemoryWithoutWAL() throws IOException
{
2449 FileSystem fs
= FileSystem
.get(CONF
);
2450 Path rootDir
= new Path(dir
+ "testDataInMemoryWithoutWAL");
2451 FSHLog hLog
= new FSHLog(fs
, rootDir
, "testDataInMemoryWithoutWAL", CONF
);
2452 // This chunk creation is done throughout the code base. Do we want to move it into core?
2453 // It is missing from this test. W/o it we NPE.
2454 ChunkCreator
.initialize(MemStoreLABImpl
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null);
2455 HRegion region
= initHRegion(tableName
, null, null, false, Durability
.SYNC_WAL
, hLog
,
2456 COLUMN_FAMILY_BYTES
);
2458 Cell originalCell
= CellUtil
.createCell(row
, COLUMN_FAMILY_BYTES
, qual1
,
2459 System
.currentTimeMillis(), KeyValue
.Type
.Put
.getCode(), value1
);
2460 final long originalSize
= KeyValueUtil
.length(originalCell
);
2462 Cell addCell
= CellUtil
.createCell(row
, COLUMN_FAMILY_BYTES
, qual1
,
2463 System
.currentTimeMillis(), KeyValue
.Type
.Put
.getCode(), Bytes
.toBytes("xxxxxxxxxx"));
2464 final long addSize
= KeyValueUtil
.length(addCell
);
2466 LOG
.info("originalSize:" + originalSize
2467 + ", addSize:" + addSize
);
2468 // start test. We expect that the addPut's durability will be replaced
2469 // by originalPut's durability.
2472 testDataInMemoryWithoutWAL(region
,
2473 new Put(row
).add(originalCell
).setDurability(Durability
.SKIP_WAL
),
2474 new Put(row
).add(addCell
).setDurability(Durability
.SKIP_WAL
),
2475 originalSize
+ addSize
);
2478 testDataInMemoryWithoutWAL(region
,
2479 new Put(row
).add(originalCell
).setDurability(Durability
.SKIP_WAL
),
2480 new Put(row
).add(addCell
).setDurability(Durability
.SYNC_WAL
),
2481 originalSize
+ addSize
);
2484 testDataInMemoryWithoutWAL(region
,
2485 new Put(row
).add(originalCell
).setDurability(Durability
.SYNC_WAL
),
2486 new Put(row
).add(addCell
).setDurability(Durability
.SKIP_WAL
),
2490 testDataInMemoryWithoutWAL(region
,
2491 new Put(row
).add(originalCell
).setDurability(Durability
.SYNC_WAL
),
2492 new Put(row
).add(addCell
).setDurability(Durability
.SYNC_WAL
),
2496 private static void testDataInMemoryWithoutWAL(HRegion region
, Put originalPut
,
2497 final Put addPut
, long delta
) throws IOException
{
2498 final long initSize
= region
.getDataInMemoryWithoutWAL();
2499 // save normalCPHost and replaced by mockedCPHost
2500 RegionCoprocessorHost normalCPHost
= region
.getCoprocessorHost();
2501 RegionCoprocessorHost mockedCPHost
= Mockito
.mock(RegionCoprocessorHost
.class);
2502 // Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must
2503 // do below format (from Mockito doc).
2504 Mockito
.doAnswer(new Answer() {
2506 public Object
answer(InvocationOnMock invocation
) throws Throwable
{
2507 MiniBatchOperationInProgress
<Mutation
> mb
= invocation
.getArgument(0);
2508 mb
.addOperationsFromCP(0, new Mutation
[]{addPut
});
2511 }).when(mockedCPHost
).preBatchMutate(Mockito
.isA(MiniBatchOperationInProgress
.class));
2512 region
.setCoprocessorHost(mockedCPHost
);
2513 region
.put(originalPut
);
2514 region
.setCoprocessorHost(normalCPHost
);
2515 final long finalSize
= region
.getDataInMemoryWithoutWAL();
2516 assertEquals("finalSize:" + finalSize
+ ", initSize:"
2517 + initSize
+ ", delta:" + delta
,finalSize
, initSize
+ delta
);
2521 public void testDeleteColumns_PostInsert() throws IOException
, InterruptedException
{
2522 Delete delete
= new Delete(row
);
2523 delete
.addColumns(fam1
, qual1
);
2524 doTestDelete_AndPostInsert(delete
);
2528 public void testaddFamily_PostInsert() throws IOException
, InterruptedException
{
2529 Delete delete
= new Delete(row
);
2530 delete
.addFamily(fam1
);
2531 doTestDelete_AndPostInsert(delete
);
2534 public void doTestDelete_AndPostInsert(Delete delete
) throws IOException
, InterruptedException
{
2535 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
2537 EnvironmentEdgeManagerTestHelper
.injectEdge(new IncrementingEnvironmentEdge());
2538 Put put
= new Put(row
);
2539 put
.addColumn(fam1
, qual1
, value1
);
2542 // now delete the value:
2543 region
.delete(delete
);
2547 put
.addColumn(fam1
, qual1
, value2
);
2551 Get get
= new Get(row
);
2552 get
.addColumn(fam1
, qual1
);
2554 Result r
= region
.get(get
);
2555 assertEquals(1, r
.size());
2556 assertArrayEquals(value2
, r
.getValue(fam1
, qual1
));
2559 Scan scan
= new Scan(row
);
2560 scan
.addColumn(fam1
, qual1
);
2561 InternalScanner s
= region
.getScanner(scan
);
2563 List
<Cell
> results
= new ArrayList
<>();
2564 assertEquals(false, s
.next(results
));
2565 assertEquals(1, results
.size());
2566 Cell kv
= results
.get(0);
2568 assertArrayEquals(value2
, CellUtil
.cloneValue(kv
));
2569 assertArrayEquals(fam1
, CellUtil
.cloneFamily(kv
));
2570 assertArrayEquals(qual1
, CellUtil
.cloneQualifier(kv
));
2571 assertArrayEquals(row
, CellUtil
.cloneRow(kv
));
2573 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2579 public void testDelete_CheckTimestampUpdated() throws IOException
{
2580 byte[] row1
= Bytes
.toBytes("row1");
2581 byte[] col1
= Bytes
.toBytes("col1");
2582 byte[] col2
= Bytes
.toBytes("col2");
2583 byte[] col3
= Bytes
.toBytes("col3");
2585 // Setting up region
2586 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
2588 // Building checkerList
2589 List
<Cell
> kvs
= new ArrayList
<>();
2590 kvs
.add(new KeyValue(row1
, fam1
, col1
, null));
2591 kvs
.add(new KeyValue(row1
, fam1
, col2
, null));
2592 kvs
.add(new KeyValue(row1
, fam1
, col3
, null));
2594 NavigableMap
<byte[], List
<Cell
>> deleteMap
= new TreeMap
<>(Bytes
.BYTES_COMPARATOR
);
2595 deleteMap
.put(fam1
, kvs
);
2596 region
.delete(deleteMap
, Durability
.SYNC_WAL
);
2598 // extract the key values out the memstore:
2599 // This is kinda hacky, but better than nothing...
2600 long now
= System
.currentTimeMillis();
2601 AbstractMemStore memstore
= (AbstractMemStore
)region
.getStore(fam1
).memstore
;
2602 Cell firstCell
= memstore
.getActive().first();
2603 assertTrue(firstCell
.getTimestamp() <= now
);
2604 now
= firstCell
.getTimestamp();
2605 for (Cell cell
: memstore
.getActive().getCellSet()) {
2606 assertTrue(cell
.getTimestamp() <= now
);
2607 now
= cell
.getTimestamp();
2610 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2615 // ////////////////////////////////////////////////////////////////////////////
2617 // ////////////////////////////////////////////////////////////////////////////
2619 public void testGet_FamilyChecker() throws IOException
{
2620 byte[] row1
= Bytes
.toBytes("row1");
2621 byte[] fam1
= Bytes
.toBytes("fam1");
2622 byte[] fam2
= Bytes
.toBytes("False");
2623 byte[] col1
= Bytes
.toBytes("col1");
2625 // Setting up region
2626 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
2628 Get get
= new Get(row1
);
2629 get
.addColumn(fam2
, col1
);
2634 } catch (org
.apache
.hadoop
.hbase
.DoNotRetryIOException e
) {
2640 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2646 public void testGet_Basic() throws IOException
{
2647 byte[] row1
= Bytes
.toBytes("row1");
2648 byte[] fam1
= Bytes
.toBytes("fam1");
2649 byte[] col1
= Bytes
.toBytes("col1");
2650 byte[] col2
= Bytes
.toBytes("col2");
2651 byte[] col3
= Bytes
.toBytes("col3");
2652 byte[] col4
= Bytes
.toBytes("col4");
2653 byte[] col5
= Bytes
.toBytes("col5");
2655 // Setting up region
2656 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
2659 Put put
= new Put(row1
);
2660 put
.addColumn(fam1
, col1
, null);
2661 put
.addColumn(fam1
, col2
, null);
2662 put
.addColumn(fam1
, col3
, null);
2663 put
.addColumn(fam1
, col4
, null);
2664 put
.addColumn(fam1
, col5
, null);
2667 Get get
= new Get(row1
);
2668 get
.addColumn(fam1
, col2
);
2669 get
.addColumn(fam1
, col4
);
2671 KeyValue kv1
= new KeyValue(row1
, fam1
, col2
);
2672 KeyValue kv2
= new KeyValue(row1
, fam1
, col4
);
2673 KeyValue
[] expected
= { kv1
, kv2
};
2676 Result res
= region
.get(get
);
2677 assertEquals(expected
.length
, res
.size());
2678 for (int i
= 0; i
< res
.size(); i
++) {
2679 assertTrue(CellUtil
.matchingRows(expected
[i
], res
.rawCells()[i
]));
2680 assertTrue(CellUtil
.matchingFamily(expected
[i
], res
.rawCells()[i
]));
2681 assertTrue(CellUtil
.matchingQualifier(expected
[i
], res
.rawCells()[i
]));
2684 // Test using a filter on a Get
2685 Get g
= new Get(row1
);
2686 final int count
= 2;
2687 g
.setFilter(new ColumnCountGetFilter(count
));
2688 res
= region
.get(g
);
2689 assertEquals(count
, res
.size());
2691 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2697 public void testGet_Empty() throws IOException
{
2698 byte[] row
= Bytes
.toBytes("row");
2699 byte[] fam
= Bytes
.toBytes("fam");
2701 this.region
= initHRegion(tableName
, method
, CONF
, fam
);
2703 Get get
= new Get(row
);
2705 Result r
= region
.get(get
);
2707 assertTrue(r
.isEmpty());
2709 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2715 public void testGetWithFilter() throws IOException
, InterruptedException
{
2716 byte[] row1
= Bytes
.toBytes("row1");
2717 byte[] fam1
= Bytes
.toBytes("fam1");
2718 byte[] col1
= Bytes
.toBytes("col1");
2719 byte[] value1
= Bytes
.toBytes("value1");
2720 byte[] value2
= Bytes
.toBytes("value2");
2722 final int maxVersions
= 3;
2723 HColumnDescriptor hcd
= new HColumnDescriptor(fam1
);
2724 hcd
.setMaxVersions(maxVersions
);
2725 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf("testFilterAndColumnTracker"));
2727 ChunkCreator
.initialize(MemStoreLABImpl
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null);
2728 HRegionInfo info
= new HRegionInfo(htd
.getTableName(), null, null, false);
2729 Path logDir
= TEST_UTIL
.getDataTestDirOnTestFS(method
+ ".log");
2730 final WAL wal
= HBaseTestingUtility
.createWal(TEST_UTIL
.getConfiguration(), logDir
, info
);
2731 this.region
= TEST_UTIL
.createLocalHRegion(info
, htd
, wal
);
2734 // Put 4 version to memstore
2736 Put put
= new Put(row1
, ts
);
2737 put
.addColumn(fam1
, col1
, value1
);
2739 put
= new Put(row1
, ts
+ 1);
2740 put
.addColumn(fam1
, col1
, Bytes
.toBytes("filter1"));
2742 put
= new Put(row1
, ts
+ 2);
2743 put
.addColumn(fam1
, col1
, Bytes
.toBytes("filter2"));
2745 put
= new Put(row1
, ts
+ 3);
2746 put
.addColumn(fam1
, col1
, value2
);
2749 Get get
= new Get(row1
);
2750 get
.setMaxVersions();
2751 Result res
= region
.get(get
);
2752 // Get 3 versions, the oldest version has gone from user view
2753 assertEquals(maxVersions
, res
.size());
2755 get
.setFilter(new ValueFilter(CompareOp
.EQUAL
, new SubstringComparator("value")));
2756 res
= region
.get(get
);
2757 // When use value filter, the oldest version should still gone from user view and it
2758 // should only return one key vaule
2759 assertEquals(1, res
.size());
2760 assertTrue(CellUtil
.matchingValue(new KeyValue(row1
, fam1
, col1
, value2
), res
.rawCells()[0]));
2761 assertEquals(ts
+ 3, res
.rawCells()[0].getTimestamp());
2764 region
.compact(true);
2766 res
= region
.get(get
);
2767 // After flush and compact, the result should be consistent with previous result
2768 assertEquals(1, res
.size());
2769 assertTrue(CellUtil
.matchingValue(new KeyValue(row1
, fam1
, col1
, value2
), res
.rawCells()[0]));
2771 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2776 // ////////////////////////////////////////////////////////////////////////////
2778 // ////////////////////////////////////////////////////////////////////////////
2780 public void testGetScanner_WithOkFamilies() throws IOException
{
2781 byte[] fam1
= Bytes
.toBytes("fam1");
2782 byte[] fam2
= Bytes
.toBytes("fam2");
2784 byte[][] families
= { fam1
, fam2
};
2786 // Setting up region
2787 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2789 Scan scan
= new Scan();
2790 scan
.addFamily(fam1
);
2791 scan
.addFamily(fam2
);
2793 region
.getScanner(scan
);
2794 } catch (Exception e
) {
2795 assertTrue("Families could not be found in Region", false);
2798 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2804 public void testGetScanner_WithNotOkFamilies() throws IOException
{
2805 byte[] fam1
= Bytes
.toBytes("fam1");
2806 byte[] fam2
= Bytes
.toBytes("fam2");
2808 byte[][] families
= { fam1
};
2810 // Setting up region
2811 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2813 Scan scan
= new Scan();
2814 scan
.addFamily(fam2
);
2817 region
.getScanner(scan
);
2818 } catch (Exception e
) {
2821 assertTrue("Families could not be found in Region", ok
);
2823 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2829 public void testGetScanner_WithNoFamilies() throws IOException
{
2830 byte[] row1
= Bytes
.toBytes("row1");
2831 byte[] fam1
= Bytes
.toBytes("fam1");
2832 byte[] fam2
= Bytes
.toBytes("fam2");
2833 byte[] fam3
= Bytes
.toBytes("fam3");
2834 byte[] fam4
= Bytes
.toBytes("fam4");
2836 byte[][] families
= { fam1
, fam2
, fam3
, fam4
};
2838 // Setting up region
2839 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2842 // Putting data in Region
2843 Put put
= new Put(row1
);
2844 put
.addColumn(fam1
, null, null);
2845 put
.addColumn(fam2
, null, null);
2846 put
.addColumn(fam3
, null, null);
2847 put
.addColumn(fam4
, null, null);
2851 HRegion
.RegionScannerImpl is
= null;
2853 // Testing to see how many scanners that is produced by getScanner,
2855 // with known number, 2 - current = 1
2857 scan
.addFamily(fam2
);
2858 scan
.addFamily(fam4
);
2859 is
= region
.getScanner(scan
);
2860 assertEquals(1, is
.storeHeap
.getHeap().size());
2863 is
= region
.getScanner(scan
);
2864 assertEquals(families
.length
- 1, is
.storeHeap
.getHeap().size());
2866 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2872 * This method tests https://issues.apache.org/jira/browse/HBASE-2516.
2874 * @throws IOException
2877 public void testGetScanner_WithRegionClosed() throws IOException
{
2878 byte[] fam1
= Bytes
.toBytes("fam1");
2879 byte[] fam2
= Bytes
.toBytes("fam2");
2881 byte[][] families
= { fam1
, fam2
};
2883 // Setting up region
2885 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2886 } catch (IOException e
) {
2887 e
.printStackTrace();
2888 fail("Got IOException during initHRegion, " + e
.getMessage());
2891 region
.closed
.set(true);
2893 region
.getScanner(null);
2894 fail("Expected to get an exception during getScanner on a region that is closed");
2895 } catch (NotServingRegionException e
) {
2896 // this is the correct exception that is expected
2897 } catch (IOException e
) {
2898 fail("Got wrong type of exception - should be a NotServingRegionException, " +
2899 "but was an IOException: "
2903 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2909 public void testRegionScanner_Next() throws IOException
{
2910 byte[] row1
= Bytes
.toBytes("row1");
2911 byte[] row2
= Bytes
.toBytes("row2");
2912 byte[] fam1
= Bytes
.toBytes("fam1");
2913 byte[] fam2
= Bytes
.toBytes("fam2");
2914 byte[] fam3
= Bytes
.toBytes("fam3");
2915 byte[] fam4
= Bytes
.toBytes("fam4");
2917 byte[][] families
= { fam1
, fam2
, fam3
, fam4
};
2918 long ts
= System
.currentTimeMillis();
2920 // Setting up region
2921 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2923 // Putting data in Region
2925 put
= new Put(row1
);
2926 put
.addColumn(fam1
, (byte[]) null, ts
, null);
2927 put
.addColumn(fam2
, (byte[]) null, ts
, null);
2928 put
.addColumn(fam3
, (byte[]) null, ts
, null);
2929 put
.addColumn(fam4
, (byte[]) null, ts
, null);
2932 put
= new Put(row2
);
2933 put
.addColumn(fam1
, (byte[]) null, ts
, null);
2934 put
.addColumn(fam2
, (byte[]) null, ts
, null);
2935 put
.addColumn(fam3
, (byte[]) null, ts
, null);
2936 put
.addColumn(fam4
, (byte[]) null, ts
, null);
2939 Scan scan
= new Scan();
2940 scan
.addFamily(fam2
);
2941 scan
.addFamily(fam4
);
2942 InternalScanner is
= region
.getScanner(scan
);
2944 List
<Cell
> res
= null;
2947 List
<Cell
> expected1
= new ArrayList
<>();
2948 expected1
.add(new KeyValue(row1
, fam2
, null, ts
, KeyValue
.Type
.Put
, null));
2949 expected1
.add(new KeyValue(row1
, fam4
, null, ts
, KeyValue
.Type
.Put
, null));
2951 res
= new ArrayList
<>();
2953 for (int i
= 0; i
< res
.size(); i
++) {
2954 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected1
.get(i
), res
.get(i
)));
2958 List
<Cell
> expected2
= new ArrayList
<>();
2959 expected2
.add(new KeyValue(row2
, fam2
, null, ts
, KeyValue
.Type
.Put
, null));
2960 expected2
.add(new KeyValue(row2
, fam4
, null, ts
, KeyValue
.Type
.Put
, null));
2962 res
= new ArrayList
<>();
2964 for (int i
= 0; i
< res
.size(); i
++) {
2965 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected2
.get(i
), res
.get(i
)));
2968 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
2974 public void testScanner_ExplicitColumns_FromMemStore_EnforceVersions() throws IOException
{
2975 byte[] row1
= Bytes
.toBytes("row1");
2976 byte[] qf1
= Bytes
.toBytes("qualifier1");
2977 byte[] qf2
= Bytes
.toBytes("qualifier2");
2978 byte[] fam1
= Bytes
.toBytes("fam1");
2979 byte[][] families
= { fam1
};
2981 long ts1
= System
.currentTimeMillis();
2985 // Setting up region
2986 this.region
= initHRegion(tableName
, method
, CONF
, families
);
2988 // Putting data in Region
2990 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
2991 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
2992 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
2994 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
2995 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
2996 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
2998 put
= new Put(row1
);
3008 List
<Cell
> expected
= new ArrayList
<>();
3012 Scan scan
= new Scan(row1
);
3013 scan
.addColumn(fam1
, qf1
);
3014 scan
.setMaxVersions(MAX_VERSIONS
);
3015 List
<Cell
> actual
= new ArrayList
<>();
3016 InternalScanner scanner
= region
.getScanner(scan
);
3018 boolean hasNext
= scanner
.next(actual
);
3019 assertEquals(false, hasNext
);
3022 for (int i
= 0; i
< expected
.size(); i
++) {
3023 assertEquals(expected
.get(i
), actual
.get(i
));
3026 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3032 public void testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions() throws IOException
{
3033 byte[] row1
= Bytes
.toBytes("row1");
3034 byte[] qf1
= Bytes
.toBytes("qualifier1");
3035 byte[] qf2
= Bytes
.toBytes("qualifier2");
3036 byte[] fam1
= Bytes
.toBytes("fam1");
3037 byte[][] families
= { fam1
};
3039 long ts1
= 1; // System.currentTimeMillis();
3043 // Setting up region
3044 this.region
= initHRegion(tableName
, method
, CONF
, families
);
3046 // Putting data in Region
3048 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
3049 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
3050 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
3052 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
3053 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
3054 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
3056 put
= new Put(row1
);
3067 List
<Cell
> expected
= new ArrayList
<>();
3073 Scan scan
= new Scan(row1
);
3074 scan
.addColumn(fam1
, qf1
);
3075 scan
.addColumn(fam1
, qf2
);
3076 scan
.setMaxVersions(MAX_VERSIONS
);
3077 List
<Cell
> actual
= new ArrayList
<>();
3078 InternalScanner scanner
= region
.getScanner(scan
);
3080 boolean hasNext
= scanner
.next(actual
);
3081 assertEquals(false, hasNext
);
3084 for (int i
= 0; i
< expected
.size(); i
++) {
3085 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected
.get(i
), actual
.get(i
)));
3088 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3094 public void testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions() throws
3096 byte[] row1
= Bytes
.toBytes("row1");
3097 byte[] fam1
= Bytes
.toBytes("fam1");
3098 byte[][] families
= { fam1
};
3099 byte[] qf1
= Bytes
.toBytes("qualifier1");
3100 byte[] qf2
= Bytes
.toBytes("qualifier2");
3107 // Setting up region
3108 this.region
= initHRegion(tableName
, method
, CONF
, families
);
3110 // Putting data in Region
3111 KeyValue kv14
= new KeyValue(row1
, fam1
, qf1
, ts4
, KeyValue
.Type
.Put
, null);
3112 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
3113 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
3114 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
3116 KeyValue kv24
= new KeyValue(row1
, fam1
, qf2
, ts4
, KeyValue
.Type
.Put
, null);
3117 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
3118 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
3119 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
3122 put
= new Put(row1
);
3128 put
= new Put(row1
);
3134 put
= new Put(row1
);
3140 put
= new Put(row1
);
3146 List
<Cell
> expected
= new ArrayList
<>();
3154 Scan scan
= new Scan(row1
);
3155 scan
.addColumn(fam1
, qf1
);
3156 scan
.addColumn(fam1
, qf2
);
3158 scan
.setMaxVersions(versions
);
3159 List
<Cell
> actual
= new ArrayList
<>();
3160 InternalScanner scanner
= region
.getScanner(scan
);
3162 boolean hasNext
= scanner
.next(actual
);
3163 assertEquals(false, hasNext
);
3166 for (int i
= 0; i
< expected
.size(); i
++) {
3167 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected
.get(i
), actual
.get(i
)));
3170 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3176 public void testScanner_Wildcard_FromMemStore_EnforceVersions() throws IOException
{
3177 byte[] row1
= Bytes
.toBytes("row1");
3178 byte[] qf1
= Bytes
.toBytes("qualifier1");
3179 byte[] qf2
= Bytes
.toBytes("qualifier2");
3180 byte[] fam1
= Bytes
.toBytes("fam1");
3181 byte[][] families
= { fam1
};
3183 long ts1
= System
.currentTimeMillis();
3187 // Setting up region
3188 this.region
= initHRegion(tableName
, method
, CONF
, families
);
3190 // Putting data in Region
3192 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
3193 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
3194 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
3196 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
3197 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
3198 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
3200 put
= new Put(row1
);
3210 List
<Cell
> expected
= new ArrayList
<>();
3216 Scan scan
= new Scan(row1
);
3217 scan
.addFamily(fam1
);
3218 scan
.setMaxVersions(MAX_VERSIONS
);
3219 List
<Cell
> actual
= new ArrayList
<>();
3220 InternalScanner scanner
= region
.getScanner(scan
);
3222 boolean hasNext
= scanner
.next(actual
);
3223 assertEquals(false, hasNext
);
3226 for (int i
= 0; i
< expected
.size(); i
++) {
3227 assertEquals(expected
.get(i
), actual
.get(i
));
3230 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3236 public void testScanner_Wildcard_FromFilesOnly_EnforceVersions() throws IOException
{
3237 byte[] row1
= Bytes
.toBytes("row1");
3238 byte[] qf1
= Bytes
.toBytes("qualifier1");
3239 byte[] qf2
= Bytes
.toBytes("qualifier2");
3240 byte[] fam1
= Bytes
.toBytes("fam1");
3242 long ts1
= 1; // System.currentTimeMillis();
3246 // Setting up region
3247 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
3249 // Putting data in Region
3251 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
3252 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
3253 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
3255 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
3256 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
3257 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
3259 put
= new Put(row1
);
3270 List
<Cell
> expected
= new ArrayList
<>();
3276 Scan scan
= new Scan(row1
);
3277 scan
.addFamily(fam1
);
3278 scan
.setMaxVersions(MAX_VERSIONS
);
3279 List
<Cell
> actual
= new ArrayList
<>();
3280 InternalScanner scanner
= region
.getScanner(scan
);
3282 boolean hasNext
= scanner
.next(actual
);
3283 assertEquals(false, hasNext
);
3286 for (int i
= 0; i
< expected
.size(); i
++) {
3287 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected
.get(i
), actual
.get(i
)));
3290 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3296 public void testScanner_StopRow1542() throws IOException
{
3297 byte[] family
= Bytes
.toBytes("testFamily");
3298 this.region
= initHRegion(tableName
, method
, CONF
, family
);
3300 byte[] row1
= Bytes
.toBytes("row111");
3301 byte[] row2
= Bytes
.toBytes("row222");
3302 byte[] row3
= Bytes
.toBytes("row333");
3303 byte[] row4
= Bytes
.toBytes("row444");
3304 byte[] row5
= Bytes
.toBytes("row555");
3306 byte[] col1
= Bytes
.toBytes("Pub111");
3307 byte[] col2
= Bytes
.toBytes("Pub222");
3309 Put put
= new Put(row1
);
3310 put
.addColumn(family
, col1
, Bytes
.toBytes(10L));
3313 put
= new Put(row2
);
3314 put
.addColumn(family
, col1
, Bytes
.toBytes(15L));
3317 put
= new Put(row3
);
3318 put
.addColumn(family
, col2
, Bytes
.toBytes(20L));
3321 put
= new Put(row4
);
3322 put
.addColumn(family
, col2
, Bytes
.toBytes(30L));
3325 put
= new Put(row5
);
3326 put
.addColumn(family
, col1
, Bytes
.toBytes(40L));
3329 Scan scan
= new Scan(row3
, row4
);
3330 scan
.setMaxVersions();
3331 scan
.addColumn(family
, col1
);
3332 InternalScanner s
= region
.getScanner(scan
);
3334 List
<Cell
> results
= new ArrayList
<>();
3335 assertEquals(false, s
.next(results
));
3336 assertEquals(0, results
.size());
3338 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3344 public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions() throws IOException
{
3345 byte[] row1
= Bytes
.toBytes("row1");
3346 byte[] fam1
= Bytes
.toBytes("fam1");
3347 byte[] qf1
= Bytes
.toBytes("qualifier1");
3348 byte[] qf2
= Bytes
.toBytes("quateslifier2");
3355 // Setting up region
3356 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
3358 // Putting data in Region
3359 KeyValue kv14
= new KeyValue(row1
, fam1
, qf1
, ts4
, KeyValue
.Type
.Put
, null);
3360 KeyValue kv13
= new KeyValue(row1
, fam1
, qf1
, ts3
, KeyValue
.Type
.Put
, null);
3361 KeyValue kv12
= new KeyValue(row1
, fam1
, qf1
, ts2
, KeyValue
.Type
.Put
, null);
3362 KeyValue kv11
= new KeyValue(row1
, fam1
, qf1
, ts1
, KeyValue
.Type
.Put
, null);
3364 KeyValue kv24
= new KeyValue(row1
, fam1
, qf2
, ts4
, KeyValue
.Type
.Put
, null);
3365 KeyValue kv23
= new KeyValue(row1
, fam1
, qf2
, ts3
, KeyValue
.Type
.Put
, null);
3366 KeyValue kv22
= new KeyValue(row1
, fam1
, qf2
, ts2
, KeyValue
.Type
.Put
, null);
3367 KeyValue kv21
= new KeyValue(row1
, fam1
, qf2
, ts1
, KeyValue
.Type
.Put
, null);
3370 put
= new Put(row1
);
3376 put
= new Put(row1
);
3382 put
= new Put(row1
);
3388 put
= new Put(row1
);
3394 List
<KeyValue
> expected
= new ArrayList
<>();
3402 Scan scan
= new Scan(row1
);
3404 scan
.setMaxVersions(versions
);
3405 List
<Cell
> actual
= new ArrayList
<>();
3406 InternalScanner scanner
= region
.getScanner(scan
);
3408 boolean hasNext
= scanner
.next(actual
);
3409 assertEquals(false, hasNext
);
3412 for (int i
= 0; i
< expected
.size(); i
++) {
3413 assertTrue(PrivateCellUtil
.equalsIgnoreMvccVersion(expected
.get(i
), actual
.get(i
)));
3416 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3422 * Added for HBASE-5416
3424 * Here we test scan optimization when only subset of CFs are used in filter
3428 public void testScanner_JoinedScanners() throws IOException
{
3429 byte[] cf_essential
= Bytes
.toBytes("essential");
3430 byte[] cf_joined
= Bytes
.toBytes("joined");
3431 byte[] cf_alpha
= Bytes
.toBytes("alpha");
3432 this.region
= initHRegion(tableName
, method
, CONF
, cf_essential
, cf_joined
, cf_alpha
);
3434 byte[] row1
= Bytes
.toBytes("row1");
3435 byte[] row2
= Bytes
.toBytes("row2");
3436 byte[] row3
= Bytes
.toBytes("row3");
3438 byte[] col_normal
= Bytes
.toBytes("d");
3439 byte[] col_alpha
= Bytes
.toBytes("a");
3441 byte[] filtered_val
= Bytes
.toBytes(3);
3443 Put put
= new Put(row1
);
3444 put
.addColumn(cf_essential
, col_normal
, Bytes
.toBytes(1));
3445 put
.addColumn(cf_joined
, col_alpha
, Bytes
.toBytes(1));
3448 put
= new Put(row2
);
3449 put
.addColumn(cf_essential
, col_alpha
, Bytes
.toBytes(2));
3450 put
.addColumn(cf_joined
, col_normal
, Bytes
.toBytes(2));
3451 put
.addColumn(cf_alpha
, col_alpha
, Bytes
.toBytes(2));
3454 put
= new Put(row3
);
3455 put
.addColumn(cf_essential
, col_normal
, filtered_val
);
3456 put
.addColumn(cf_joined
, col_normal
, filtered_val
);
3459 // Check two things:
3460 // 1. result list contains expected values
3461 // 2. result list is sorted properly
3463 Scan scan
= new Scan();
3464 Filter filter
= new SingleColumnValueExcludeFilter(cf_essential
, col_normal
,
3465 CompareOp
.NOT_EQUAL
, filtered_val
);
3466 scan
.setFilter(filter
);
3467 scan
.setLoadColumnFamiliesOnDemand(true);
3468 InternalScanner s
= region
.getScanner(scan
);
3470 List
<Cell
> results
= new ArrayList
<>();
3471 assertTrue(s
.next(results
));
3472 assertEquals(1, results
.size());
3475 assertTrue(s
.next(results
));
3476 assertEquals(3, results
.size());
3477 assertTrue("orderCheck", CellUtil
.matchingFamily(results
.get(0), cf_alpha
));
3478 assertTrue("orderCheck", CellUtil
.matchingFamily(results
.get(1), cf_essential
));
3479 assertTrue("orderCheck", CellUtil
.matchingFamily(results
.get(2), cf_joined
));
3482 assertFalse(s
.next(results
));
3483 assertEquals(0, results
.size());
3485 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3493 * Test case when scan limits amount of KVs returned on each next() call.
3496 public void testScanner_JoinedScannersWithLimits() throws IOException
{
3497 final byte[] cf_first
= Bytes
.toBytes("first");
3498 final byte[] cf_second
= Bytes
.toBytes("second");
3500 this.region
= initHRegion(tableName
, method
, CONF
, cf_first
, cf_second
);
3502 final byte[] col_a
= Bytes
.toBytes("a");
3503 final byte[] col_b
= Bytes
.toBytes("b");
3507 for (int i
= 0; i
< 10; i
++) {
3508 put
= new Put(Bytes
.toBytes("r" + Integer
.toString(i
)));
3509 put
.addColumn(cf_first
, col_a
, Bytes
.toBytes(i
));
3511 put
.addColumn(cf_first
, col_b
, Bytes
.toBytes(i
));
3512 put
.addColumn(cf_second
, col_a
, Bytes
.toBytes(i
));
3513 put
.addColumn(cf_second
, col_b
, Bytes
.toBytes(i
));
3518 Scan scan
= new Scan();
3519 scan
.setLoadColumnFamiliesOnDemand(true);
3520 Filter bogusFilter
= new FilterBase() {
3522 public ReturnCode
filterCell(final Cell ignored
) throws IOException
{
3523 return ReturnCode
.INCLUDE
;
3526 public boolean isFamilyEssential(byte[] name
) {
3527 return Bytes
.equals(name
, cf_first
);
3531 scan
.setFilter(bogusFilter
);
3532 InternalScanner s
= region
.getScanner(scan
);
3534 // Our data looks like this:
3535 // r0: first:a, first:b, second:a, second:b
3536 // r1: first:a, first:b, second:a, second:b
3537 // r2: first:a, first:b, second:a, second:b
3538 // r3: first:a, first:b, second:a, second:b
3539 // r4: first:a, first:b, second:a, second:b
3546 // But due to next's limit set to 3, we should get this:
3547 // r0: first:a, first:b, second:a
3549 // r1: first:a, first:b, second:a
3551 // r2: first:a, first:b, second:a
3553 // r3: first:a, first:b, second:a
3555 // r4: first:a, first:b, second:a
3563 List
<Cell
> results
= new ArrayList
<>();
3565 ScannerContext scannerContext
= ScannerContext
.newBuilder().setBatchLimit(3).build();
3567 boolean more
= s
.next(results
, scannerContext
);
3568 if ((index
>> 1) < 5) {
3569 if (index
% 2 == 0) {
3570 assertEquals(3, results
.size());
3572 assertEquals(1, results
.size());
3575 assertEquals(1, results
.size());
3584 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3590 * Write an HFile block full with Cells whose qualifier that are identical between
3591 * 0 and Short.MAX_VALUE. See HBASE-13329.
3595 public void testLongQualifier() throws Exception
{
3596 byte[] family
= Bytes
.toBytes("family");
3597 this.region
= initHRegion(tableName
, method
, CONF
, family
);
3598 byte[] q
= new byte[Short
.MAX_VALUE
+2];
3599 Arrays
.fill(q
, 0, q
.length
-1, (byte)42);
3600 for (byte i
=0; i
<10; i
++) {
3601 Put p
= new Put(Bytes
.toBytes("row"));
3602 // qualifiers that differ past Short.MAX_VALUE
3604 p
.addColumn(family
, q
, q
);
3607 region
.flush(false);
3608 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3613 * Flushes the cache in a thread while scanning. The tests verify that the
3614 * scan is coherent - e.g. the returned results are always of the same or
3615 * later update as the previous results.
3617 * @throws IOException
3619 * @throws InterruptedException
3623 public void testFlushCacheWhileScanning() throws IOException
, InterruptedException
{
3624 byte[] family
= Bytes
.toBytes("family");
3626 int flushAndScanInterval
= 10;
3627 int compactInterval
= 10 * flushAndScanInterval
;
3629 this.region
= initHRegion(tableName
, method
, CONF
, family
);
3630 FlushThread flushThread
= new FlushThread();
3632 flushThread
.start();
3634 Scan scan
= new Scan();
3635 scan
.addFamily(family
);
3636 scan
.setFilter(new SingleColumnValueFilter(family
, qual1
, CompareOp
.EQUAL
,
3637 new BinaryComparator(Bytes
.toBytes(5L))));
3639 int expectedCount
= 0;
3640 List
<Cell
> res
= new ArrayList
<>();
3642 boolean toggle
= true;
3643 for (long i
= 0; i
< numRows
; i
++) {
3644 Put put
= new Put(Bytes
.toBytes(i
));
3645 put
.setDurability(Durability
.SKIP_WAL
);
3646 put
.addColumn(family
, qual1
, Bytes
.toBytes(i
% 10));
3649 if (i
!= 0 && i
% compactInterval
== 0) {
3650 LOG
.debug("iteration = " + i
+ " ts="+System
.currentTimeMillis());
3651 region
.compact(true);
3658 if (i
!= 0 && i
% flushAndScanInterval
== 0) {
3660 InternalScanner scanner
= region
.getScanner(scan
);
3662 flushThread
.flush();
3664 while (scanner
.next(res
))
3667 flushThread
.flush();
3669 assertEquals("toggle="+toggle
+"i=" + i
+ " ts="+System
.currentTimeMillis(),
3670 expectedCount
, res
.size());
3679 flushThread
.checkNoError();
3680 } catch (InterruptedException ie
) {
3681 LOG
.warn("Caught exception when joining with flushThread", ie
);
3683 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3688 protected class FlushThread
extends Thread
{
3689 private volatile boolean done
;
3690 private Throwable error
= null;
3693 super("FlushThread");
3696 public void done() {
3698 synchronized (this) {
3703 public void checkNoError() {
3704 if (error
!= null) {
3713 synchronized (this) {
3716 } catch (InterruptedException ignored
) {
3724 } catch (IOException e
) {
3726 LOG
.error("Error while flushing cache", e
);
3730 } catch (Throwable t
) {
3731 LOG
.error("Uncaught exception", t
);
3737 public void flush() {
3738 synchronized (this) {
3745 * Writes very wide records and scans for the latest every time.. Flushes and
3746 * compacts the region every now and then to keep things realistic.
3748 * @throws IOException
3749 * by flush / scan / compaction
3750 * @throws InterruptedException
3751 * when joining threads
3754 public void testWritesWhileScanning() throws IOException
, InterruptedException
{
3755 int testCount
= 100;
3757 int numFamilies
= 10;
3758 int numQualifiers
= 100;
3759 int flushInterval
= 7;
3760 int compactInterval
= 5 * flushInterval
;
3761 byte[][] families
= new byte[numFamilies
][];
3762 for (int i
= 0; i
< numFamilies
; i
++) {
3763 families
[i
] = Bytes
.toBytes("family" + i
);
3765 byte[][] qualifiers
= new byte[numQualifiers
][];
3766 for (int i
= 0; i
< numQualifiers
; i
++) {
3767 qualifiers
[i
] = Bytes
.toBytes("qual" + i
);
3770 this.region
= initHRegion(tableName
, method
, CONF
, families
);
3771 FlushThread flushThread
= new FlushThread();
3772 PutThread putThread
= new PutThread(numRows
, families
, qualifiers
);
3775 putThread
.waitForFirstPut();
3777 flushThread
.start();
3779 Scan scan
= new Scan(Bytes
.toBytes("row0"), Bytes
.toBytes("row1"));
3781 int expectedCount
= numFamilies
* numQualifiers
;
3782 List
<Cell
> res
= new ArrayList
<>();
3784 long prevTimestamp
= 0L;
3785 for (int i
= 0; i
< testCount
; i
++) {
3787 if (i
!= 0 && i
% compactInterval
== 0) {
3788 region
.compact(true);
3789 for (HStore store
: region
.getStores()) {
3790 store
.closeAndArchiveCompactedFiles();
3794 if (i
!= 0 && i
% flushInterval
== 0) {
3795 flushThread
.flush();
3798 boolean previousEmpty
= res
.isEmpty();
3800 InternalScanner scanner
= region
.getScanner(scan
);
3801 while (scanner
.next(res
))
3803 if (!res
.isEmpty() || !previousEmpty
|| i
> compactInterval
) {
3804 assertEquals("i=" + i
, expectedCount
, res
.size());
3805 long timestamp
= res
.get(0).getTimestamp();
3806 assertTrue("Timestamps were broke: " + timestamp
+ " prev: " + prevTimestamp
,
3807 timestamp
>= prevTimestamp
);
3808 prevTimestamp
= timestamp
;
3820 flushThread
.checkNoError();
3823 putThread
.checkNoError();
3824 } catch (InterruptedException ie
) {
3825 LOG
.warn("Caught exception when joining with flushThread", ie
);
3829 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
3830 } catch (DroppedSnapshotException dse
) {
3831 // We could get this on way out because we interrupt the background flusher and it could
3832 // fail anywhere causing a DSE over in the background flusher... only it is not properly
3833 // dealt with so could still be memory hanging out when we get to here -- memory we can't
3834 // flush because the accounting is 'off' since original DSE.
3840 protected class PutThread
extends Thread
{
3841 private volatile boolean done
;
3842 private volatile int numPutsFinished
= 0;
3844 private Throwable error
= null;
3845 private int numRows
;
3846 private byte[][] families
;
3847 private byte[][] qualifiers
;
3849 private PutThread(int numRows
, byte[][] families
, byte[][] qualifiers
) {
3851 this.numRows
= numRows
;
3852 this.families
= families
;
3853 this.qualifiers
= qualifiers
;
3857 * Block calling thread until this instance of PutThread has put at least one row.
3859 public void waitForFirstPut() throws InterruptedException
{
3860 // wait until put thread actually puts some data
3861 while (isAlive() && numPutsFinished
== 0) {
3867 public void done() {
3869 synchronized (this) {
3874 public void checkNoError() {
3875 if (error
!= null) {
3885 for (int r
= 0; r
< numRows
; r
++) {
3886 byte[] row
= Bytes
.toBytes("row" + r
);
3887 Put put
= new Put(row
);
3888 put
.setDurability(Durability
.SKIP_WAL
);
3889 byte[] value
= Bytes
.toBytes(String
.valueOf(numPutsFinished
));
3890 for (byte[] family
: families
) {
3891 for (byte[] qualifier
: qualifiers
) {
3892 put
.addColumn(family
, qualifier
, numPutsFinished
, value
);
3897 if (numPutsFinished
> 0 && numPutsFinished
% 47 == 0) {
3898 System
.out
.println("put iteration = " + numPutsFinished
);
3899 Delete delete
= new Delete(row
, (long) numPutsFinished
- 30);
3900 region
.delete(delete
);
3904 } catch (InterruptedIOException e
) {
3905 // This is fine. It means we are done, or didn't get the lock on time
3906 LOG
.info("Interrupted", e
);
3907 } catch (IOException e
) {
3908 LOG
.error("Error while putting records", e
);
3919 * Writes very wide records and gets the latest row every time.. Flushes and
3920 * compacts the region aggressivly to catch issues.
3922 * @throws IOException
3923 * by flush / scan / compaction
3924 * @throws InterruptedException
3925 * when joining threads
3928 public void testWritesWhileGetting() throws Exception
{
3931 int numFamilies
= 10;
3932 int numQualifiers
= 100;
3933 int compactInterval
= 100;
3934 byte[][] families
= new byte[numFamilies
][];
3935 for (int i
= 0; i
< numFamilies
; i
++) {
3936 families
[i
] = Bytes
.toBytes("family" + i
);
3938 byte[][] qualifiers
= new byte[numQualifiers
][];
3939 for (int i
= 0; i
< numQualifiers
; i
++) {
3940 qualifiers
[i
] = Bytes
.toBytes("qual" + i
);
3944 // This test flushes constantly and can cause many files to be created,
3946 // extending over the ulimit. Make sure compactions are aggressive in
3948 // the number of HFiles created.
3949 Configuration conf
= HBaseConfiguration
.create(CONF
);
3950 conf
.setInt("hbase.hstore.compaction.min", 1);
3951 conf
.setInt("hbase.hstore.compaction.max", 1000);
3952 this.region
= initHRegion(tableName
, method
, conf
, families
);
3953 PutThread putThread
= null;
3954 MultithreadedTestUtil
.TestContext ctx
= new MultithreadedTestUtil
.TestContext(conf
);
3956 putThread
= new PutThread(numRows
, families
, qualifiers
);
3958 putThread
.waitForFirstPut();
3960 // Add a thread that flushes as fast as possible
3961 ctx
.addThread(new RepeatingTestThread(ctx
) {
3964 public void doAnAction() throws Exception
{
3966 // Compact regularly to avoid creating too many files and exceeding
3968 region
.compact(false);
3969 for (HStore store
: region
.getStores()) {
3970 store
.closeAndArchiveCompactedFiles();
3976 Get get
= new Get(Bytes
.toBytes("row0"));
3977 Result result
= null;
3979 int expectedCount
= numFamilies
* numQualifiers
;
3981 long prevTimestamp
= 0L;
3982 for (int i
= 0; i
< testCount
; i
++) {
3983 LOG
.info("testWritesWhileGetting verify turn " + i
);
3984 boolean previousEmpty
= result
== null || result
.isEmpty();
3985 result
= region
.get(get
);
3986 if (!result
.isEmpty() || !previousEmpty
|| i
> compactInterval
) {
3987 assertEquals("i=" + i
, expectedCount
, result
.size());
3988 // TODO this was removed, now what dangit?!
3989 // search looking for the qualifier in question?
3991 for (Cell kv
: result
.rawCells()) {
3992 if (CellUtil
.matchingFamily(kv
, families
[0])
3993 && CellUtil
.matchingQualifier(kv
, qualifiers
[0])) {
3994 timestamp
= kv
.getTimestamp();
3997 assertTrue(timestamp
>= prevTimestamp
);
3998 prevTimestamp
= timestamp
;
3999 Cell previousKV
= null;
4001 for (Cell kv
: result
.rawCells()) {
4002 byte[] thisValue
= CellUtil
.cloneValue(kv
);
4003 if (previousKV
!= null) {
4004 if (Bytes
.compareTo(CellUtil
.cloneValue(previousKV
), thisValue
) != 0) {
4005 LOG
.warn("These two KV should have the same value." + " Previous KV:" + previousKV
4006 + "(memStoreTS:" + previousKV
.getSequenceId() + ")" + ", New KV: " + kv
4007 + "(memStoreTS:" + kv
.getSequenceId() + ")");
4008 assertEquals(0, Bytes
.compareTo(CellUtil
.cloneValue(previousKV
), thisValue
));
4016 if (putThread
!= null)
4021 if (putThread
!= null) {
4023 putThread
.checkNoError();
4027 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4033 public void testHolesInMeta() throws Exception
{
4034 byte[] family
= Bytes
.toBytes("family");
4035 this.region
= initHRegion(tableName
, Bytes
.toBytes("x"), Bytes
.toBytes("z"), method
, CONF
,
4038 byte[] rowNotServed
= Bytes
.toBytes("a");
4039 Get g
= new Get(rowNotServed
);
4043 } catch (WrongRegionException x
) {
4046 byte[] row
= Bytes
.toBytes("y");
4050 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4056 public void testIndexesScanWithOneDeletedRow() throws IOException
{
4057 byte[] family
= Bytes
.toBytes("family");
4059 // Setting up region
4060 this.region
= initHRegion(tableName
, method
, CONF
, family
);
4062 Put put
= new Put(Bytes
.toBytes(1L));
4063 put
.addColumn(family
, qual1
, 1L, Bytes
.toBytes(1L));
4068 Delete delete
= new Delete(Bytes
.toBytes(1L), 1L);
4069 region
.delete(delete
);
4071 put
= new Put(Bytes
.toBytes(2L));
4072 put
.addColumn(family
, qual1
, 2L, Bytes
.toBytes(2L));
4075 Scan idxScan
= new Scan();
4076 idxScan
.addFamily(family
);
4077 idxScan
.setFilter(new FilterList(FilterList
.Operator
.MUST_PASS_ALL
, Arrays
.<Filter
> asList(
4078 new SingleColumnValueFilter(family
, qual1
, CompareOp
.GREATER_OR_EQUAL
,
4079 new BinaryComparator(Bytes
.toBytes(0L))), new SingleColumnValueFilter(family
, qual1
,
4080 CompareOp
.LESS_OR_EQUAL
, new BinaryComparator(Bytes
.toBytes(3L))))));
4081 InternalScanner scanner
= region
.getScanner(idxScan
);
4082 List
<Cell
> res
= new ArrayList
<>();
4084 while (scanner
.next(res
))
4086 assertEquals(1L, res
.size());
4088 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4093 // ////////////////////////////////////////////////////////////////////////////
4094 // Bloom filter test
4095 // ////////////////////////////////////////////////////////////////////////////
4097 public void testBloomFilterSize() throws IOException
{
4098 byte[] fam1
= Bytes
.toBytes("fam1");
4099 byte[] qf1
= Bytes
.toBytes("col");
4100 byte[] val1
= Bytes
.toBytes("value1");
4102 HColumnDescriptor hcd
= new HColumnDescriptor(fam1
).setMaxVersions(Integer
.MAX_VALUE
)
4103 .setBloomFilterType(BloomType
.ROWCOL
);
4105 HTableDescriptor htd
= new HTableDescriptor(tableName
);
4107 HRegionInfo info
= new HRegionInfo(htd
.getTableName(), null, null, false);
4108 this.region
= TEST_UTIL
.createLocalHRegion(info
, htd
);
4110 int num_unique_rows
= 10;
4111 int duplicate_multiplier
= 2;
4112 int num_storefiles
= 4;
4115 for (int f
= 0; f
< num_storefiles
; f
++) {
4116 for (int i
= 0; i
< duplicate_multiplier
; i
++) {
4117 for (int j
= 0; j
< num_unique_rows
; j
++) {
4118 Put put
= new Put(Bytes
.toBytes("row" + j
));
4119 put
.setDurability(Durability
.SKIP_WAL
);
4120 long ts
= version
++;
4121 put
.addColumn(fam1
, qf1
, ts
, val1
);
4127 // before compaction
4128 HStore store
= region
.getStore(fam1
);
4129 Collection
<HStoreFile
> storeFiles
= store
.getStorefiles();
4130 for (HStoreFile storefile
: storeFiles
) {
4131 StoreFileReader reader
= storefile
.getReader();
4132 reader
.loadFileInfo();
4133 reader
.loadBloomfilter();
4134 assertEquals(num_unique_rows
* duplicate_multiplier
, reader
.getEntries());
4135 assertEquals(num_unique_rows
, reader
.getFilterEntries());
4138 region
.compact(true);
4141 storeFiles
= store
.getStorefiles();
4142 for (HStoreFile storefile
: storeFiles
) {
4143 StoreFileReader reader
= storefile
.getReader();
4144 reader
.loadFileInfo();
4145 reader
.loadBloomfilter();
4146 assertEquals(num_unique_rows
* duplicate_multiplier
* num_storefiles
, reader
.getEntries());
4147 assertEquals(num_unique_rows
, reader
.getFilterEntries());
4150 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4156 public void testAllColumnsWithBloomFilter() throws IOException
{
4157 byte[] TABLE
= Bytes
.toBytes(name
.getMethodName());
4158 byte[] FAMILY
= Bytes
.toBytes("family");
4161 HColumnDescriptor hcd
= new HColumnDescriptor(FAMILY
).setMaxVersions(Integer
.MAX_VALUE
)
4162 .setBloomFilterType(BloomType
.ROWCOL
);
4163 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(TABLE
));
4165 HRegionInfo info
= new HRegionInfo(htd
.getTableName(), null, null, false);
4166 this.region
= TEST_UTIL
.createLocalHRegion(info
, htd
);
4168 // For row:0, col:0: insert versions 1 through 5.
4169 byte row
[] = Bytes
.toBytes("row:" + 0);
4170 byte column
[] = Bytes
.toBytes("column:" + 0);
4171 Put put
= new Put(row
);
4172 put
.setDurability(Durability
.SKIP_WAL
);
4173 for (long idx
= 1; idx
<= 4; idx
++) {
4174 put
.addColumn(FAMILY
, column
, idx
, Bytes
.toBytes("value-version-" + idx
));
4182 Get get
= new Get(row
);
4183 get
.setMaxVersions();
4184 Cell
[] kvs
= region
.get(get
).rawCells();
4186 // Check if rows are correct
4187 assertEquals(4, kvs
.length
);
4188 checkOneCell(kvs
[0], FAMILY
, 0, 0, 4);
4189 checkOneCell(kvs
[1], FAMILY
, 0, 0, 3);
4190 checkOneCell(kvs
[2], FAMILY
, 0, 0, 2);
4191 checkOneCell(kvs
[3], FAMILY
, 0, 0, 1);
4193 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4199 * Testcase to cover bug-fix for HBASE-2823 Ensures correct delete when
4200 * issuing delete row on columns with bloom filter set to row+col
4201 * (BloomType.ROWCOL)
4204 public void testDeleteRowWithBloomFilter() throws IOException
{
4205 byte[] familyName
= Bytes
.toBytes("familyName");
4208 HColumnDescriptor hcd
= new HColumnDescriptor(familyName
).setMaxVersions(Integer
.MAX_VALUE
)
4209 .setBloomFilterType(BloomType
.ROWCOL
);
4211 HTableDescriptor htd
= new HTableDescriptor(tableName
);
4213 HRegionInfo info
= new HRegionInfo(htd
.getTableName(), null, null, false);
4214 this.region
= TEST_UTIL
.createLocalHRegion(info
, htd
);
4217 byte row
[] = Bytes
.toBytes("row1");
4218 byte col
[] = Bytes
.toBytes("col1");
4220 Put put
= new Put(row
);
4221 put
.addColumn(familyName
, col
, 1, Bytes
.toBytes("SomeRandomValue"));
4225 Delete del
= new Delete(row
);
4229 // Get remaining rows (should have none)
4230 Get get
= new Get(row
);
4231 get
.addColumn(familyName
, col
);
4233 Cell
[] keyValues
= region
.get(get
).rawCells();
4234 assertTrue(keyValues
.length
== 0);
4236 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4242 public void testgetHDFSBlocksDistribution() throws Exception
{
4243 HBaseTestingUtility htu
= new HBaseTestingUtility();
4244 // Why do we set the block size in this test? If we set it smaller than the kvs, then we'll
4245 // break up the file in to more pieces that can be distributed across the three nodes and we
4246 // won't be able to have the condition this test asserts; that at least one node has
4247 // a copy of all replicas -- if small block size, then blocks are spread evenly across the
4248 // the three nodes. hfilev3 with tags seems to put us over the block size. St.Ack.
4249 // final int DEFAULT_BLOCK_SIZE = 1024;
4250 // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
4251 htu
.getConfiguration().setInt("dfs.replication", 2);
4253 // set up a cluster with 3 nodes
4254 MiniHBaseCluster cluster
= null;
4255 String dataNodeHosts
[] = new String
[] { "host1", "host2", "host3" };
4256 int regionServersCount
= 3;
4259 cluster
= htu
.startMiniCluster(1, regionServersCount
, dataNodeHosts
);
4260 byte[][] families
= { fam1
, fam2
};
4261 Table ht
= htu
.createTable(tableName
, families
);
4263 // Setting up region
4264 byte row
[] = Bytes
.toBytes("row1");
4265 byte col
[] = Bytes
.toBytes("col1");
4267 Put put
= new Put(row
);
4268 put
.addColumn(fam1
, col
, 1, Bytes
.toBytes("test1"));
4269 put
.addColumn(fam2
, col
, 1, Bytes
.toBytes("test2"));
4272 HRegion firstRegion
= htu
.getHBaseCluster().getRegions(tableName
).get(0);
4273 firstRegion
.flush(true);
4274 HDFSBlocksDistribution blocksDistribution1
= firstRegion
.getHDFSBlocksDistribution();
4276 // Given the default replication factor is 2 and we have 2 HFiles,
4277 // we will have total of 4 replica of blocks on 3 datanodes; thus there
4278 // must be at least one host that have replica for 2 HFiles. That host's
4279 // weight will be equal to the unique block weight.
4280 long uniqueBlocksWeight1
= blocksDistribution1
.getUniqueBlocksTotalWeight();
4281 StringBuilder sb
= new StringBuilder();
4282 for (String host
: blocksDistribution1
.getTopHosts()) {
4283 if (sb
.length() > 0) sb
.append(", ");
4286 sb
.append(blocksDistribution1
.getWeight(host
));
4289 String topHost
= blocksDistribution1
.getTopHosts().get(0);
4290 long topHostWeight
= blocksDistribution1
.getWeight(topHost
);
4291 String msg
= "uniqueBlocksWeight=" + uniqueBlocksWeight1
+ ", topHostWeight=" +
4292 topHostWeight
+ ", topHost=" + topHost
+ "; " + sb
.toString();
4294 assertTrue(msg
, uniqueBlocksWeight1
== topHostWeight
);
4296 // use the static method to compute the value, it should be the same.
4297 // static method is used by load balancer or other components
4298 HDFSBlocksDistribution blocksDistribution2
= HRegion
.computeHDFSBlocksDistribution(
4299 htu
.getConfiguration(), firstRegion
.getTableDescriptor(), firstRegion
.getRegionInfo());
4300 long uniqueBlocksWeight2
= blocksDistribution2
.getUniqueBlocksTotalWeight();
4302 assertTrue(uniqueBlocksWeight1
== uniqueBlocksWeight2
);
4306 if (cluster
!= null) {
4307 htu
.shutdownMiniCluster();
4313 * Testcase to check state of region initialization task set to ABORTED or not
4314 * if any exceptions during initialization
4319 public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception
{
4322 FileSystem fs
= Mockito
.mock(FileSystem
.class);
4323 Mockito
.when(fs
.exists((Path
) Mockito
.anyObject())).thenThrow(new IOException());
4324 HTableDescriptor htd
= new HTableDescriptor(tableName
);
4325 htd
.addFamily(new HColumnDescriptor("cf"));
4326 info
= new HRegionInfo(htd
.getTableName(), HConstants
.EMPTY_BYTE_ARRAY
,
4327 HConstants
.EMPTY_BYTE_ARRAY
, false);
4328 Path path
= new Path(dir
+ "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
4329 region
= HRegion
.newHRegion(path
, null, fs
, CONF
, info
, htd
, null);
4330 // region initialization throws IOException and set task state to ABORTED.
4331 region
.initialize();
4332 fail("Region initialization should fail due to IOException");
4333 } catch (IOException io
) {
4334 List
<MonitoredTask
> tasks
= TaskMonitor
.get().getTasks();
4335 for (MonitoredTask monitoredTask
: tasks
) {
4336 if (!(monitoredTask
instanceof MonitoredRPCHandler
)
4337 && monitoredTask
.getDescription().contains(region
.toString())) {
4338 assertTrue("Region state should be ABORTED.",
4339 monitoredTask
.getState().equals(MonitoredTask
.State
.ABORTED
));
4344 HBaseTestingUtility
.closeRegionAndWAL(region
);
4349 * Verifies that the .regioninfo file is written on region creation and that
4350 * is recreated if missing during region opening.
4353 public void testRegionInfoFileCreation() throws IOException
{
4354 Path rootDir
= new Path(dir
+ "testRegionInfoFileCreation");
4356 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
4357 htd
.addFamily(new HColumnDescriptor("cf"));
4359 HRegionInfo hri
= new HRegionInfo(htd
.getTableName());
4361 // Create a region and skip the initialization (like CreateTableHandler)
4362 HRegion region
= HBaseTestingUtility
.createRegionAndWAL(hri
, rootDir
, CONF
, htd
, false);
4363 Path regionDir
= region
.getRegionFileSystem().getRegionDir();
4364 FileSystem fs
= region
.getRegionFileSystem().getFileSystem();
4365 HBaseTestingUtility
.closeRegionAndWAL(region
);
4367 Path regionInfoFile
= new Path(regionDir
, HRegionFileSystem
.REGION_INFO_FILE
);
4369 // Verify that the .regioninfo file is present
4370 assertTrue(HRegionFileSystem
.REGION_INFO_FILE
+ " should be present in the region dir",
4371 fs
.exists(regionInfoFile
));
4373 // Try to open the region
4374 region
= HRegion
.openHRegion(rootDir
, hri
, htd
, null, CONF
);
4375 assertEquals(regionDir
, region
.getRegionFileSystem().getRegionDir());
4376 HBaseTestingUtility
.closeRegionAndWAL(region
);
4378 // Verify that the .regioninfo file is still there
4379 assertTrue(HRegionFileSystem
.REGION_INFO_FILE
+ " should be present in the region dir",
4380 fs
.exists(regionInfoFile
));
4382 // Remove the .regioninfo file and verify is recreated on region open
4383 fs
.delete(regionInfoFile
, true);
4384 assertFalse(HRegionFileSystem
.REGION_INFO_FILE
+ " should be removed from the region dir",
4385 fs
.exists(regionInfoFile
));
4387 region
= HRegion
.openHRegion(rootDir
, hri
, htd
, null, CONF
);
4388 // region = TEST_UTIL.openHRegion(hri, htd);
4389 assertEquals(regionDir
, region
.getRegionFileSystem().getRegionDir());
4390 HBaseTestingUtility
.closeRegionAndWAL(region
);
4392 // Verify that the .regioninfo file is still there
4393 assertTrue(HRegionFileSystem
.REGION_INFO_FILE
+ " should be present in the region dir",
4394 fs
.exists(new Path(regionDir
, HRegionFileSystem
.REGION_INFO_FILE
)));
4398 * TestCase for increment
4400 private static class Incrementer
implements Runnable
{
4401 private HRegion region
;
4402 private final static byte[] incRow
= Bytes
.toBytes("incRow");
4403 private final static byte[] family
= Bytes
.toBytes("family");
4404 private final static byte[] qualifier
= Bytes
.toBytes("qualifier");
4405 private final static long ONE
= 1L;
4406 private int incCounter
;
4408 public Incrementer(HRegion region
, int incCounter
) {
4409 this.region
= region
;
4410 this.incCounter
= incCounter
;
4416 while (count
< incCounter
) {
4417 Increment inc
= new Increment(incRow
);
4418 inc
.addColumn(family
, qualifier
, ONE
);
4421 region
.increment(inc
);
4422 } catch (IOException e
) {
4423 LOG
.info("Count=" + count
+ ", " + e
);
4431 * Test case to check increment function with memstore flushing
4435 public void testParallelIncrementWithMemStoreFlush() throws Exception
{
4436 byte[] family
= Incrementer
.family
;
4437 this.region
= initHRegion(tableName
, method
, CONF
, family
);
4438 final HRegion region
= this.region
;
4439 final AtomicBoolean incrementDone
= new AtomicBoolean(false);
4440 Runnable flusher
= new Runnable() {
4443 while (!incrementDone
.get()) {
4446 } catch (Exception e
) {
4447 e
.printStackTrace();
4453 // after all increment finished, the row will increment to 20*100 = 2000
4455 int incCounter
= 100;
4456 long expected
= (long) threadNum
* incCounter
;
4457 Thread
[] incrementers
= new Thread
[threadNum
];
4458 Thread flushThread
= new Thread(flusher
);
4459 for (int i
= 0; i
< threadNum
; i
++) {
4460 incrementers
[i
] = new Thread(new Incrementer(this.region
, incCounter
));
4461 incrementers
[i
].start();
4463 flushThread
.start();
4464 for (int i
= 0; i
< threadNum
; i
++) {
4465 incrementers
[i
].join();
4468 incrementDone
.set(true);
4471 Get get
= new Get(Incrementer
.incRow
);
4472 get
.addColumn(Incrementer
.family
, Incrementer
.qualifier
);
4473 get
.setMaxVersions(1);
4474 Result res
= this.region
.get(get
);
4475 List
<Cell
> kvs
= res
.getColumnCells(Incrementer
.family
, Incrementer
.qualifier
);
4477 // we just got the latest version
4478 assertEquals(1, kvs
.size());
4479 Cell kv
= kvs
.get(0);
4480 assertEquals(expected
, Bytes
.toLong(kv
.getValueArray(), kv
.getValueOffset()));
4485 * TestCase for append
4487 private static class Appender
implements Runnable
{
4488 private HRegion region
;
4489 private final static byte[] appendRow
= Bytes
.toBytes("appendRow");
4490 private final static byte[] family
= Bytes
.toBytes("family");
4491 private final static byte[] qualifier
= Bytes
.toBytes("qualifier");
4492 private final static byte[] CHAR
= Bytes
.toBytes("a");
4493 private int appendCounter
;
4495 public Appender(HRegion region
, int appendCounter
) {
4496 this.region
= region
;
4497 this.appendCounter
= appendCounter
;
4503 while (count
< appendCounter
) {
4504 Append app
= new Append(appendRow
);
4505 app
.addColumn(family
, qualifier
, CHAR
);
4509 } catch (IOException e
) {
4510 LOG
.info("Count=" + count
+ ", max=" + appendCounter
+ ", " + e
);
4518 * Test case to check append function with memstore flushing
4522 public void testParallelAppendWithMemStoreFlush() throws Exception
{
4523 byte[] family
= Appender
.family
;
4524 this.region
= initHRegion(tableName
, method
, CONF
, family
);
4525 final HRegion region
= this.region
;
4526 final AtomicBoolean appendDone
= new AtomicBoolean(false);
4527 Runnable flusher
= new Runnable() {
4530 while (!appendDone
.get()) {
4533 } catch (Exception e
) {
4534 e
.printStackTrace();
4540 // After all append finished, the value will append to threadNum *
4541 // appendCounter Appender.CHAR
4543 int appendCounter
= 100;
4544 byte[] expected
= new byte[threadNum
* appendCounter
];
4545 for (int i
= 0; i
< threadNum
* appendCounter
; i
++) {
4546 System
.arraycopy(Appender
.CHAR
, 0, expected
, i
, 1);
4548 Thread
[] appenders
= new Thread
[threadNum
];
4549 Thread flushThread
= new Thread(flusher
);
4550 for (int i
= 0; i
< threadNum
; i
++) {
4551 appenders
[i
] = new Thread(new Appender(this.region
, appendCounter
));
4552 appenders
[i
].start();
4554 flushThread
.start();
4555 for (int i
= 0; i
< threadNum
; i
++) {
4556 appenders
[i
].join();
4559 appendDone
.set(true);
4562 Get get
= new Get(Appender
.appendRow
);
4563 get
.addColumn(Appender
.family
, Appender
.qualifier
);
4564 get
.setMaxVersions(1);
4565 Result res
= this.region
.get(get
);
4566 List
<Cell
> kvs
= res
.getColumnCells(Appender
.family
, Appender
.qualifier
);
4568 // we just got the latest version
4569 assertEquals(1, kvs
.size());
4570 Cell kv
= kvs
.get(0);
4571 byte[] appendResult
= new byte[kv
.getValueLength()];
4572 System
.arraycopy(kv
.getValueArray(), kv
.getValueOffset(), appendResult
, 0, kv
.getValueLength());
4573 assertArrayEquals(expected
, appendResult
);
4578 * Test case to check put function with memstore flushing for same row, same ts
4582 public void testPutWithMemStoreFlush() throws Exception
{
4583 byte[] family
= Bytes
.toBytes("family");
4584 byte[] qualifier
= Bytes
.toBytes("qualifier");
4585 byte[] row
= Bytes
.toBytes("putRow");
4586 byte[] value
= null;
4587 this.region
= initHRegion(tableName
, method
, CONF
, family
);
4590 List
<Cell
> kvs
= null;
4594 value
= Bytes
.toBytes("value0");
4595 put
.addColumn(family
, qualifier
, 1234567L, value
);
4598 get
.addColumn(family
, qualifier
);
4599 get
.setMaxVersions();
4600 res
= this.region
.get(get
);
4601 kvs
= res
.getColumnCells(family
, qualifier
);
4602 assertEquals(1, kvs
.size());
4603 assertArrayEquals(Bytes
.toBytes("value0"), CellUtil
.cloneValue(kvs
.get(0)));
4607 get
.addColumn(family
, qualifier
);
4608 get
.setMaxVersions();
4609 res
= this.region
.get(get
);
4610 kvs
= res
.getColumnCells(family
, qualifier
);
4611 assertEquals(1, kvs
.size());
4612 assertArrayEquals(Bytes
.toBytes("value0"), CellUtil
.cloneValue(kvs
.get(0)));
4615 value
= Bytes
.toBytes("value1");
4616 put
.addColumn(family
, qualifier
, 1234567L, value
);
4619 get
.addColumn(family
, qualifier
);
4620 get
.setMaxVersions();
4621 res
= this.region
.get(get
);
4622 kvs
= res
.getColumnCells(family
, qualifier
);
4623 assertEquals(1, kvs
.size());
4624 assertArrayEquals(Bytes
.toBytes("value1"), CellUtil
.cloneValue(kvs
.get(0)));
4628 get
.addColumn(family
, qualifier
);
4629 get
.setMaxVersions();
4630 res
= this.region
.get(get
);
4631 kvs
= res
.getColumnCells(family
, qualifier
);
4632 assertEquals(1, kvs
.size());
4633 assertArrayEquals(Bytes
.toBytes("value1"), CellUtil
.cloneValue(kvs
.get(0)));
4637 public void testDurability() throws Exception
{
4638 // there are 5 x 5 cases:
4639 // table durability(SYNC,FSYNC,ASYC,SKIP,USE_DEFAULT) x mutation
4640 // durability(SYNC,FSYNC,ASYC,SKIP,USE_DEFAULT)
4642 // expected cases for append and sync wal
4643 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.SYNC_WAL
, 0, true, true, false);
4644 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.FSYNC_WAL
, 0, true, true, false);
4645 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.USE_DEFAULT
, 0, true, true, false);
4647 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.SYNC_WAL
, 0, true, true, false);
4648 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.FSYNC_WAL
, 0, true, true, false);
4649 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.USE_DEFAULT
, 0, true, true, false);
4651 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.SYNC_WAL
, 0, true, true, false);
4652 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.FSYNC_WAL
, 0, true, true, false);
4654 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.SYNC_WAL
, 0, true, true, false);
4655 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.FSYNC_WAL
, 0, true, true, false);
4657 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.SYNC_WAL
, 0, true, true, false);
4658 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.FSYNC_WAL
, 0, true, true, false);
4659 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.USE_DEFAULT
, 0, true, true, false);
4661 // expected cases for async wal
4662 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.ASYNC_WAL
, 0, true, false, false);
4663 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.ASYNC_WAL
, 0, true, false, false);
4664 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.ASYNC_WAL
, 0, true, false, false);
4665 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.ASYNC_WAL
, 0, true, false, false);
4666 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.ASYNC_WAL
, 0, true, false, false);
4667 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.USE_DEFAULT
, 0, true, false, false);
4669 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.ASYNC_WAL
, 5000, true, false, true);
4670 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.ASYNC_WAL
, 5000, true, false, true);
4671 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.ASYNC_WAL
, 5000, true, false, true);
4672 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.ASYNC_WAL
, 5000, true, false, true);
4673 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.ASYNC_WAL
, 5000, true, false, true);
4674 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.USE_DEFAULT
, 5000, true, false, true);
4676 // expect skip wal cases
4677 durabilityTest(method
, Durability
.SYNC_WAL
, Durability
.SKIP_WAL
, 0, false, false, false);
4678 durabilityTest(method
, Durability
.FSYNC_WAL
, Durability
.SKIP_WAL
, 0, false, false, false);
4679 durabilityTest(method
, Durability
.ASYNC_WAL
, Durability
.SKIP_WAL
, 0, false, false, false);
4680 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.SKIP_WAL
, 0, false, false, false);
4681 durabilityTest(method
, Durability
.USE_DEFAULT
, Durability
.SKIP_WAL
, 0, false, false, false);
4682 durabilityTest(method
, Durability
.SKIP_WAL
, Durability
.USE_DEFAULT
, 0, false, false, false);
4686 private void durabilityTest(String method
, Durability tableDurability
,
4687 Durability mutationDurability
, long timeout
, boolean expectAppend
, final boolean expectSync
,
4688 final boolean expectSyncFromLogSyncer
) throws Exception
{
4689 Configuration conf
= HBaseConfiguration
.create(CONF
);
4690 method
= method
+ "_" + tableDurability
.name() + "_" + mutationDurability
.name();
4691 byte[] family
= Bytes
.toBytes("family");
4692 Path logDir
= new Path(new Path(dir
+ method
), "log");
4693 final Configuration walConf
= new Configuration(conf
);
4694 FSUtils
.setRootDir(walConf
, logDir
);
4695 // XXX: The spied AsyncFSWAL can not work properly because of a Mockito defect that can not
4696 // deal with classes which have a field of an inner class. See discussions in HBASE-15536.
4697 walConf
.set(WALFactory
.WAL_PROVIDER
, "filesystem");
4698 final WALFactory wals
= new WALFactory(walConf
, null, UUID
.randomUUID().toString());
4699 final WAL wal
= spy(wals
.getWAL(RegionInfoBuilder
.newBuilder(tableName
).build()));
4700 this.region
= initHRegion(tableName
, HConstants
.EMPTY_START_ROW
,
4701 HConstants
.EMPTY_END_ROW
, false, tableDurability
, wal
,
4702 new byte[][] { family
});
4704 Put put
= new Put(Bytes
.toBytes("r1"));
4705 put
.addColumn(family
, Bytes
.toBytes("q1"), Bytes
.toBytes("v1"));
4706 put
.setDurability(mutationDurability
);
4709 //verify append called or not
4710 verify(wal
, expectAppend ?
times(1) : never())
4711 .append((HRegionInfo
)any(), (WALKeyImpl
)any(),
4712 (WALEdit
)any(), Mockito
.anyBoolean());
4714 // verify sync called or not
4715 if (expectSync
|| expectSyncFromLogSyncer
) {
4716 TEST_UTIL
.waitFor(timeout
, new Waiter
.Predicate
<Exception
>() {
4718 public boolean evaluate() throws Exception
{
4721 verify(wal
, times(1)).sync(anyLong()); // Hregion calls this one
4722 } else if (expectSyncFromLogSyncer
) {
4723 verify(wal
, times(1)).sync(); // wal syncer calls this one
4725 } catch (Throwable ignore
) {
4731 //verify(wal, never()).sync(anyLong());
4732 verify(wal
, never()).sync();
4735 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
4741 public void testRegionReplicaSecondary() throws IOException
{
4742 // create a primary region, load some data and flush
4743 // create a secondary region, and do a get against that
4744 Path rootDir
= new Path(dir
+ name
.getMethodName());
4745 FSUtils
.setRootDir(TEST_UTIL
.getConfiguration(), rootDir
);
4747 byte[][] families
= new byte[][] {
4748 Bytes
.toBytes("cf1"), Bytes
.toBytes("cf2"), Bytes
.toBytes("cf3")
4750 byte[] cq
= Bytes
.toBytes("cq");
4751 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
4752 for (byte[] family
: families
) {
4753 htd
.addFamily(new HColumnDescriptor(family
));
4756 long time
= System
.currentTimeMillis();
4757 HRegionInfo primaryHri
= new HRegionInfo(htd
.getTableName(),
4758 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4760 HRegionInfo secondaryHri
= new HRegionInfo(htd
.getTableName(),
4761 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4764 HRegion primaryRegion
= null, secondaryRegion
= null;
4767 primaryRegion
= HBaseTestingUtility
.createRegionAndWAL(primaryHri
,
4768 rootDir
, TEST_UTIL
.getConfiguration(), htd
);
4771 putData(primaryRegion
, 0, 1000, cq
, families
);
4774 primaryRegion
.flush(true);
4776 // open secondary region
4777 secondaryRegion
= HRegion
.openHRegion(rootDir
, secondaryHri
, htd
, null, CONF
);
4779 verifyData(secondaryRegion
, 0, 1000, cq
, families
);
4781 if (primaryRegion
!= null) {
4782 HBaseTestingUtility
.closeRegionAndWAL(primaryRegion
);
4784 if (secondaryRegion
!= null) {
4785 HBaseTestingUtility
.closeRegionAndWAL(secondaryRegion
);
4791 public void testRegionReplicaSecondaryIsReadOnly() throws IOException
{
4792 // create a primary region, load some data and flush
4793 // create a secondary region, and do a put against that
4794 Path rootDir
= new Path(dir
+ name
.getMethodName());
4795 FSUtils
.setRootDir(TEST_UTIL
.getConfiguration(), rootDir
);
4797 byte[][] families
= new byte[][] {
4798 Bytes
.toBytes("cf1"), Bytes
.toBytes("cf2"), Bytes
.toBytes("cf3")
4800 byte[] cq
= Bytes
.toBytes("cq");
4801 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
4802 for (byte[] family
: families
) {
4803 htd
.addFamily(new HColumnDescriptor(family
));
4806 long time
= System
.currentTimeMillis();
4807 HRegionInfo primaryHri
= new HRegionInfo(htd
.getTableName(),
4808 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4810 HRegionInfo secondaryHri
= new HRegionInfo(htd
.getTableName(),
4811 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4814 HRegion primaryRegion
= null, secondaryRegion
= null;
4817 primaryRegion
= HBaseTestingUtility
.createRegionAndWAL(primaryHri
,
4818 rootDir
, TEST_UTIL
.getConfiguration(), htd
);
4821 putData(primaryRegion
, 0, 1000, cq
, families
);
4824 primaryRegion
.flush(true);
4826 // open secondary region
4827 secondaryRegion
= HRegion
.openHRegion(rootDir
, secondaryHri
, htd
, null, CONF
);
4830 putData(secondaryRegion
, 0, 1000, cq
, families
);
4831 fail("Should have thrown exception");
4832 } catch (IOException ex
) {
4836 if (primaryRegion
!= null) {
4837 HBaseTestingUtility
.closeRegionAndWAL(primaryRegion
);
4839 if (secondaryRegion
!= null) {
4840 HBaseTestingUtility
.closeRegionAndWAL(secondaryRegion
);
4845 static WALFactory
createWALFactory(Configuration conf
, Path rootDir
) throws IOException
{
4846 Configuration confForWAL
= new Configuration(conf
);
4847 confForWAL
.set(HConstants
.HBASE_DIR
, rootDir
.toString());
4848 return new WALFactory(confForWAL
,
4849 Collections
.<WALActionsListener
>singletonList(new MetricsWAL()),
4850 "hregion-" + RandomStringUtils
.randomNumeric(8));
4854 public void testCompactionFromPrimary() throws IOException
{
4855 Path rootDir
= new Path(dir
+ name
.getMethodName());
4856 FSUtils
.setRootDir(TEST_UTIL
.getConfiguration(), rootDir
);
4858 byte[][] families
= new byte[][] {
4859 Bytes
.toBytes("cf1"), Bytes
.toBytes("cf2"), Bytes
.toBytes("cf3")
4861 byte[] cq
= Bytes
.toBytes("cq");
4862 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
4863 for (byte[] family
: families
) {
4864 htd
.addFamily(new HColumnDescriptor(family
));
4867 long time
= System
.currentTimeMillis();
4868 HRegionInfo primaryHri
= new HRegionInfo(htd
.getTableName(),
4869 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4871 HRegionInfo secondaryHri
= new HRegionInfo(htd
.getTableName(),
4872 HConstants
.EMPTY_START_ROW
, HConstants
.EMPTY_END_ROW
,
4875 HRegion primaryRegion
= null, secondaryRegion
= null;
4878 primaryRegion
= HBaseTestingUtility
.createRegionAndWAL(primaryHri
,
4879 rootDir
, TEST_UTIL
.getConfiguration(), htd
);
4882 putData(primaryRegion
, 0, 1000, cq
, families
);
4885 primaryRegion
.flush(true);
4887 // open secondary region
4888 secondaryRegion
= HRegion
.openHRegion(rootDir
, secondaryHri
, htd
, null, CONF
);
4890 // move the file of the primary region to the archive, simulating a compaction
4891 Collection
<HStoreFile
> storeFiles
= primaryRegion
.getStore(families
[0]).getStorefiles();
4892 primaryRegion
.getRegionFileSystem().removeStoreFiles(Bytes
.toString(families
[0]), storeFiles
);
4893 Collection
<StoreFileInfo
> storeFileInfos
= primaryRegion
.getRegionFileSystem()
4894 .getStoreFiles(families
[0]);
4895 Assert
.assertTrue(storeFileInfos
== null || storeFileInfos
.isEmpty());
4897 verifyData(secondaryRegion
, 0, 1000, cq
, families
);
4899 if (primaryRegion
!= null) {
4900 HBaseTestingUtility
.closeRegionAndWAL(primaryRegion
);
4902 if (secondaryRegion
!= null) {
4903 HBaseTestingUtility
.closeRegionAndWAL(secondaryRegion
);
4908 private void putData(int startRow
, int numRows
, byte[] qf
, byte[]... families
) throws
4910 putData(this.region
, startRow
, numRows
, qf
, families
);
4913 private void putData(HRegion region
,
4914 int startRow
, int numRows
, byte[] qf
, byte[]... families
) throws IOException
{
4915 putData(region
, Durability
.SKIP_WAL
, startRow
, numRows
, qf
, families
);
4918 static void putData(HRegion region
, Durability durability
,
4919 int startRow
, int numRows
, byte[] qf
, byte[]... families
) throws IOException
{
4920 for (int i
= startRow
; i
< startRow
+ numRows
; i
++) {
4921 Put put
= new Put(Bytes
.toBytes("" + i
));
4922 put
.setDurability(durability
);
4923 for (byte[] family
: families
) {
4924 put
.addColumn(family
, qf
, null);
4927 LOG
.info(put
.toString());
4931 static void verifyData(HRegion newReg
, int startRow
, int numRows
, byte[] qf
, byte[]... families
)
4932 throws IOException
{
4933 for (int i
= startRow
; i
< startRow
+ numRows
; i
++) {
4934 byte[] row
= Bytes
.toBytes("" + i
);
4935 Get get
= new Get(row
);
4936 for (byte[] family
: families
) {
4937 get
.addColumn(family
, qf
);
4939 Result result
= newReg
.get(get
);
4940 Cell
[] raw
= result
.rawCells();
4941 assertEquals(families
.length
, result
.size());
4942 for (int j
= 0; j
< families
.length
; j
++) {
4943 assertTrue(CellUtil
.matchingRows(raw
[j
], row
));
4944 assertTrue(CellUtil
.matchingFamily(raw
[j
], families
[j
]));
4945 assertTrue(CellUtil
.matchingQualifier(raw
[j
], qf
));
4950 static void assertGet(final HRegion r
, final byte[] family
, final byte[] k
) throws IOException
{
4951 // Now I have k, get values out and assert they are as expected.
4952 Get get
= new Get(k
).addFamily(family
).setMaxVersions();
4953 Cell
[] results
= r
.get(get
).rawCells();
4954 for (int j
= 0; j
< results
.length
; j
++) {
4955 byte[] tmp
= CellUtil
.cloneValue(results
[j
]);
4956 // Row should be equal to value every time.
4957 assertTrue(Bytes
.equals(k
, tmp
));
4962 * Assert first value in the passed region is <code>firstValue</code>.
4970 * @throws IOException
4972 protected void assertScan(final HRegion r
, final byte[] fs
, final byte[] firstValue
)
4973 throws IOException
{
4974 byte[][] families
= { fs
};
4975 Scan scan
= new Scan();
4976 for (int i
= 0; i
< families
.length
; i
++)
4977 scan
.addFamily(families
[i
]);
4978 InternalScanner s
= r
.getScanner(scan
);
4980 List
<Cell
> curVals
= new ArrayList
<>();
4981 boolean first
= true;
4982 OUTER_LOOP
: while (s
.next(curVals
)) {
4983 for (Cell kv
: curVals
) {
4984 byte[] val
= CellUtil
.cloneValue(kv
);
4985 byte[] curval
= val
;
4988 assertTrue(Bytes
.compareTo(curval
, firstValue
) == 0);
4990 // Not asserting anything. Might as well break.
5001 * Test that we get the expected flush results back
5004 public void testFlushResult() throws IOException
{
5005 byte[] family
= Bytes
.toBytes("family");
5007 this.region
= initHRegion(tableName
, method
, family
);
5009 // empty memstore, flush doesn't run
5010 HRegion
.FlushResult fr
= region
.flush(true);
5011 assertFalse(fr
.isFlushSucceeded());
5012 assertFalse(fr
.isCompactionNeeded());
5014 // Flush enough files to get up to the threshold, doesn't need compactions
5015 for (int i
= 0; i
< 2; i
++) {
5016 Put put
= new Put(tableName
.toBytes()).addColumn(family
, family
, tableName
.toBytes());
5018 fr
= region
.flush(true);
5019 assertTrue(fr
.isFlushSucceeded());
5020 assertFalse(fr
.isCompactionNeeded());
5023 // Two flushes after the threshold, compactions are needed
5024 for (int i
= 0; i
< 2; i
++) {
5025 Put put
= new Put(tableName
.toBytes()).addColumn(family
, family
, tableName
.toBytes());
5027 fr
= region
.flush(true);
5028 assertTrue(fr
.isFlushSucceeded());
5029 assertTrue(fr
.isCompactionNeeded());
5033 protected Configuration
initSplit() {
5034 // Always compact if there is more than one store file.
5035 CONF
.setInt("hbase.hstore.compactionThreshold", 2);
5037 CONF
.setInt(HConstants
.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD
, 10 * 1000);
5039 // Increase the amount of time between client retries
5040 CONF
.setLong("hbase.client.pause", 15 * 1000);
5042 // This size should make it so we always split using the addContent
5043 // below. After adding all data, the first region is 1.3M
5044 CONF
.setLong(HConstants
.HREGION_MAX_FILESIZE
, 1024 * 128);
5049 * @return A region on which you must call
5050 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
5052 protected HRegion
initHRegion(TableName tableName
, String callingMethod
, Configuration conf
,
5053 byte[]... families
) throws IOException
{
5054 return initHRegion(tableName
, callingMethod
, conf
, false, families
);
5058 * @return A region on which you must call
5059 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
5061 protected HRegion
initHRegion(TableName tableName
, String callingMethod
, Configuration conf
,
5062 boolean isReadOnly
, byte[]... families
) throws IOException
{
5063 return initHRegion(tableName
, null, null, callingMethod
, conf
, isReadOnly
, families
);
5066 protected HRegion
initHRegion(TableName tableName
, byte[] startKey
, byte[] stopKey
,
5067 String callingMethod
, Configuration conf
, boolean isReadOnly
, byte[]... families
)
5068 throws IOException
{
5069 Path logDir
= TEST_UTIL
.getDataTestDirOnTestFS(callingMethod
+ ".log");
5070 ChunkCreator
.initialize(MemStoreLABImpl
.CHUNK_SIZE_DEFAULT
, false, 0, 0, 0, null);
5071 HRegionInfo hri
= new HRegionInfo(tableName
, startKey
, stopKey
);
5072 final WAL wal
= HBaseTestingUtility
.createWal(conf
, logDir
, hri
);
5073 return initHRegion(tableName
, startKey
, stopKey
, isReadOnly
,
5074 Durability
.SYNC_WAL
, wal
, families
);
5078 * @return A region on which you must call
5079 * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
5081 public HRegion
initHRegion(TableName tableName
, byte[] startKey
, byte[] stopKey
,
5082 boolean isReadOnly
, Durability durability
, WAL wal
, byte[]... families
) throws IOException
{
5083 return TEST_UTIL
.createLocalHRegion(tableName
, startKey
, stopKey
,
5084 isReadOnly
, durability
, wal
, families
);
5088 * Assert that the passed in Cell has expected contents for the specified row,
5089 * column & timestamp.
5091 private void checkOneCell(Cell kv
, byte[] cf
, int rowIdx
, int colIdx
, long ts
) {
5092 String ctx
= "rowIdx=" + rowIdx
+ "; colIdx=" + colIdx
+ "; ts=" + ts
;
5093 assertEquals("Row mismatch which checking: " + ctx
, "row:" + rowIdx
,
5094 Bytes
.toString(CellUtil
.cloneRow(kv
)));
5095 assertEquals("ColumnFamily mismatch while checking: " + ctx
, Bytes
.toString(cf
),
5096 Bytes
.toString(CellUtil
.cloneFamily(kv
)));
5097 assertEquals("Column qualifier mismatch while checking: " + ctx
, "column:" + colIdx
,
5098 Bytes
.toString(CellUtil
.cloneQualifier(kv
)));
5099 assertEquals("Timestamp mismatch while checking: " + ctx
, ts
, kv
.getTimestamp());
5100 assertEquals("Value mismatch while checking: " + ctx
, "value-version-" + ts
,
5101 Bytes
.toString(CellUtil
.cloneValue(kv
)));
5105 public void testReverseScanner_FromMemStore_SingleCF_Normal()
5106 throws IOException
{
5107 byte[] rowC
= Bytes
.toBytes("rowC");
5108 byte[] rowA
= Bytes
.toBytes("rowA");
5109 byte[] rowB
= Bytes
.toBytes("rowB");
5110 byte[] cf
= Bytes
.toBytes("CF");
5111 byte[][] families
= { cf
};
5112 byte[] col
= Bytes
.toBytes("C");
5114 this.region
= initHRegion(tableName
, method
, families
);
5116 KeyValue kv1
= new KeyValue(rowC
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5117 KeyValue kv11
= new KeyValue(rowC
, cf
, col
, ts
+ 1, KeyValue
.Type
.Put
,
5119 KeyValue kv2
= new KeyValue(rowA
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5120 KeyValue kv3
= new KeyValue(rowB
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5122 put
= new Put(rowC
);
5126 put
= new Put(rowA
);
5129 put
= new Put(rowB
);
5133 Scan scan
= new Scan(rowC
);
5134 scan
.setMaxVersions(5);
5135 scan
.setReversed(true);
5136 InternalScanner scanner
= region
.getScanner(scan
);
5137 List
<Cell
> currRow
= new ArrayList
<>();
5138 boolean hasNext
= scanner
.next(currRow
);
5139 assertEquals(2, currRow
.size());
5140 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5141 .get(0).getRowLength(), rowC
, 0, rowC
.length
));
5142 assertTrue(hasNext
);
5144 hasNext
= scanner
.next(currRow
);
5145 assertEquals(1, currRow
.size());
5146 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5147 .get(0).getRowLength(), rowB
, 0, rowB
.length
));
5148 assertTrue(hasNext
);
5150 hasNext
= scanner
.next(currRow
);
5151 assertEquals(1, currRow
.size());
5152 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5153 .get(0).getRowLength(), rowA
, 0, rowA
.length
));
5154 assertFalse(hasNext
);
5157 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5163 public void testReverseScanner_FromMemStore_SingleCF_LargerKey()
5164 throws IOException
{
5165 byte[] rowC
= Bytes
.toBytes("rowC");
5166 byte[] rowA
= Bytes
.toBytes("rowA");
5167 byte[] rowB
= Bytes
.toBytes("rowB");
5168 byte[] rowD
= Bytes
.toBytes("rowD");
5169 byte[] cf
= Bytes
.toBytes("CF");
5170 byte[][] families
= { cf
};
5171 byte[] col
= Bytes
.toBytes("C");
5173 this.region
= initHRegion(tableName
, method
, families
);
5175 KeyValue kv1
= new KeyValue(rowC
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5176 KeyValue kv11
= new KeyValue(rowC
, cf
, col
, ts
+ 1, KeyValue
.Type
.Put
,
5178 KeyValue kv2
= new KeyValue(rowA
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5179 KeyValue kv3
= new KeyValue(rowB
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5181 put
= new Put(rowC
);
5185 put
= new Put(rowA
);
5188 put
= new Put(rowB
);
5192 Scan scan
= new Scan(rowD
);
5193 List
<Cell
> currRow
= new ArrayList
<>();
5194 scan
.setReversed(true);
5195 scan
.setMaxVersions(5);
5196 InternalScanner scanner
= region
.getScanner(scan
);
5197 boolean hasNext
= scanner
.next(currRow
);
5198 assertEquals(2, currRow
.size());
5199 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5200 .get(0).getRowLength(), rowC
, 0, rowC
.length
));
5201 assertTrue(hasNext
);
5203 hasNext
= scanner
.next(currRow
);
5204 assertEquals(1, currRow
.size());
5205 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5206 .get(0).getRowLength(), rowB
, 0, rowB
.length
));
5207 assertTrue(hasNext
);
5209 hasNext
= scanner
.next(currRow
);
5210 assertEquals(1, currRow
.size());
5211 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5212 .get(0).getRowLength(), rowA
, 0, rowA
.length
));
5213 assertFalse(hasNext
);
5216 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5222 public void testReverseScanner_FromMemStore_SingleCF_FullScan()
5223 throws IOException
{
5224 byte[] rowC
= Bytes
.toBytes("rowC");
5225 byte[] rowA
= Bytes
.toBytes("rowA");
5226 byte[] rowB
= Bytes
.toBytes("rowB");
5227 byte[] cf
= Bytes
.toBytes("CF");
5228 byte[][] families
= { cf
};
5229 byte[] col
= Bytes
.toBytes("C");
5231 this.region
= initHRegion(tableName
, method
, families
);
5233 KeyValue kv1
= new KeyValue(rowC
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5234 KeyValue kv11
= new KeyValue(rowC
, cf
, col
, ts
+ 1, KeyValue
.Type
.Put
,
5236 KeyValue kv2
= new KeyValue(rowA
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5237 KeyValue kv3
= new KeyValue(rowB
, cf
, col
, ts
, KeyValue
.Type
.Put
, null);
5239 put
= new Put(rowC
);
5243 put
= new Put(rowA
);
5246 put
= new Put(rowB
);
5249 Scan scan
= new Scan();
5250 List
<Cell
> currRow
= new ArrayList
<>();
5251 scan
.setReversed(true);
5252 InternalScanner scanner
= region
.getScanner(scan
);
5253 boolean hasNext
= scanner
.next(currRow
);
5254 assertEquals(1, currRow
.size());
5255 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5256 .get(0).getRowLength(), rowC
, 0, rowC
.length
));
5257 assertTrue(hasNext
);
5259 hasNext
= scanner
.next(currRow
);
5260 assertEquals(1, currRow
.size());
5261 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5262 .get(0).getRowLength(), rowB
, 0, rowB
.length
));
5263 assertTrue(hasNext
);
5265 hasNext
= scanner
.next(currRow
);
5266 assertEquals(1, currRow
.size());
5267 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5268 .get(0).getRowLength(), rowA
, 0, rowA
.length
));
5269 assertFalse(hasNext
);
5272 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5278 public void testReverseScanner_moreRowsMayExistAfter() throws IOException
{
5279 // case for "INCLUDE_AND_SEEK_NEXT_ROW & SEEK_NEXT_ROW" endless loop
5280 byte[] rowA
= Bytes
.toBytes("rowA");
5281 byte[] rowB
= Bytes
.toBytes("rowB");
5282 byte[] rowC
= Bytes
.toBytes("rowC");
5283 byte[] rowD
= Bytes
.toBytes("rowD");
5284 byte[] rowE
= Bytes
.toBytes("rowE");
5285 byte[] cf
= Bytes
.toBytes("CF");
5286 byte[][] families
= { cf
};
5287 byte[] col1
= Bytes
.toBytes("col1");
5288 byte[] col2
= Bytes
.toBytes("col2");
5290 this.region
= initHRegion(tableName
, method
, families
);
5292 KeyValue kv1
= new KeyValue(rowA
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5293 KeyValue kv2
= new KeyValue(rowB
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5294 KeyValue kv3
= new KeyValue(rowC
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5295 KeyValue kv4_1
= new KeyValue(rowD
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5296 KeyValue kv4_2
= new KeyValue(rowD
, cf
, col2
, ts
, KeyValue
.Type
.Put
, null);
5297 KeyValue kv5
= new KeyValue(rowE
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5299 put
= new Put(rowA
);
5302 put
= new Put(rowB
);
5305 put
= new Put(rowC
);
5308 put
= new Put(rowD
);
5311 put
= new Put(rowD
);
5314 put
= new Put(rowE
);
5318 Scan scan
= new Scan(rowD
, rowA
);
5319 scan
.addColumn(families
[0], col1
);
5320 scan
.setReversed(true);
5321 List
<Cell
> currRow
= new ArrayList
<>();
5322 InternalScanner scanner
= region
.getScanner(scan
);
5323 boolean hasNext
= scanner
.next(currRow
);
5324 assertEquals(1, currRow
.size());
5325 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5326 .get(0).getRowLength(), rowD
, 0, rowD
.length
));
5327 assertTrue(hasNext
);
5329 hasNext
= scanner
.next(currRow
);
5330 assertEquals(1, currRow
.size());
5331 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5332 .get(0).getRowLength(), rowC
, 0, rowC
.length
));
5333 assertTrue(hasNext
);
5335 hasNext
= scanner
.next(currRow
);
5336 assertEquals(1, currRow
.size());
5337 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5338 .get(0).getRowLength(), rowB
, 0, rowB
.length
));
5339 assertFalse(hasNext
);
5342 scan
= new Scan(rowD
, rowA
);
5343 scan
.addColumn(families
[0], col2
);
5344 scan
.setReversed(true);
5346 scanner
= region
.getScanner(scan
);
5347 hasNext
= scanner
.next(currRow
);
5348 assertEquals(1, currRow
.size());
5349 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5350 .get(0).getRowLength(), rowD
, 0, rowD
.length
));
5353 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5359 public void testReverseScanner_smaller_blocksize() throws IOException
{
5360 // case to ensure no conflict with HFile index optimization
5361 byte[] rowA
= Bytes
.toBytes("rowA");
5362 byte[] rowB
= Bytes
.toBytes("rowB");
5363 byte[] rowC
= Bytes
.toBytes("rowC");
5364 byte[] rowD
= Bytes
.toBytes("rowD");
5365 byte[] rowE
= Bytes
.toBytes("rowE");
5366 byte[] cf
= Bytes
.toBytes("CF");
5367 byte[][] families
= { cf
};
5368 byte[] col1
= Bytes
.toBytes("col1");
5369 byte[] col2
= Bytes
.toBytes("col2");
5371 HBaseConfiguration config
= new HBaseConfiguration();
5372 config
.setInt("test.block.size", 1);
5373 this.region
= initHRegion(tableName
, method
, config
, families
);
5375 KeyValue kv1
= new KeyValue(rowA
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5376 KeyValue kv2
= new KeyValue(rowB
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5377 KeyValue kv3
= new KeyValue(rowC
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5378 KeyValue kv4_1
= new KeyValue(rowD
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5379 KeyValue kv4_2
= new KeyValue(rowD
, cf
, col2
, ts
, KeyValue
.Type
.Put
, null);
5380 KeyValue kv5
= new KeyValue(rowE
, cf
, col1
, ts
, KeyValue
.Type
.Put
, null);
5382 put
= new Put(rowA
);
5385 put
= new Put(rowB
);
5388 put
= new Put(rowC
);
5391 put
= new Put(rowD
);
5394 put
= new Put(rowD
);
5397 put
= new Put(rowE
);
5401 Scan scan
= new Scan(rowD
, rowA
);
5402 scan
.addColumn(families
[0], col1
);
5403 scan
.setReversed(true);
5404 List
<Cell
> currRow
= new ArrayList
<>();
5405 InternalScanner scanner
= region
.getScanner(scan
);
5406 boolean hasNext
= scanner
.next(currRow
);
5407 assertEquals(1, currRow
.size());
5408 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5409 .get(0).getRowLength(), rowD
, 0, rowD
.length
));
5410 assertTrue(hasNext
);
5412 hasNext
= scanner
.next(currRow
);
5413 assertEquals(1, currRow
.size());
5414 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5415 .get(0).getRowLength(), rowC
, 0, rowC
.length
));
5416 assertTrue(hasNext
);
5418 hasNext
= scanner
.next(currRow
);
5419 assertEquals(1, currRow
.size());
5420 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5421 .get(0).getRowLength(), rowB
, 0, rowB
.length
));
5422 assertFalse(hasNext
);
5425 scan
= new Scan(rowD
, rowA
);
5426 scan
.addColumn(families
[0], col2
);
5427 scan
.setReversed(true);
5429 scanner
= region
.getScanner(scan
);
5430 hasNext
= scanner
.next(currRow
);
5431 assertEquals(1, currRow
.size());
5432 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5433 .get(0).getRowLength(), rowD
, 0, rowD
.length
));
5436 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5442 public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs1()
5443 throws IOException
{
5444 byte[] row0
= Bytes
.toBytes("row0"); // 1 kv
5445 byte[] row1
= Bytes
.toBytes("row1"); // 2 kv
5446 byte[] row2
= Bytes
.toBytes("row2"); // 4 kv
5447 byte[] row3
= Bytes
.toBytes("row3"); // 2 kv
5448 byte[] row4
= Bytes
.toBytes("row4"); // 5 kv
5449 byte[] row5
= Bytes
.toBytes("row5"); // 2 kv
5450 byte[] cf1
= Bytes
.toBytes("CF1");
5451 byte[] cf2
= Bytes
.toBytes("CF2");
5452 byte[] cf3
= Bytes
.toBytes("CF3");
5453 byte[][] families
= { cf1
, cf2
, cf3
};
5454 byte[] col
= Bytes
.toBytes("C");
5456 HBaseConfiguration conf
= new HBaseConfiguration();
5457 // disable compactions in this test.
5458 conf
.setInt("hbase.hstore.compactionThreshold", 10000);
5459 this.region
= initHRegion(tableName
, method
, conf
, families
);
5461 // kv naming style: kv(row number) totalKvCountInThisRow seq no
5462 KeyValue kv0_1_1
= new KeyValue(row0
, cf1
, col
, ts
, KeyValue
.Type
.Put
,
5464 KeyValue kv1_2_1
= new KeyValue(row1
, cf2
, col
, ts
, KeyValue
.Type
.Put
,
5466 KeyValue kv1_2_2
= new KeyValue(row1
, cf1
, col
, ts
+ 1,
5467 KeyValue
.Type
.Put
, null);
5468 KeyValue kv2_4_1
= new KeyValue(row2
, cf2
, col
, ts
, KeyValue
.Type
.Put
,
5470 KeyValue kv2_4_2
= new KeyValue(row2
, cf1
, col
, ts
, KeyValue
.Type
.Put
,
5472 KeyValue kv2_4_3
= new KeyValue(row2
, cf3
, col
, ts
, KeyValue
.Type
.Put
,
5474 KeyValue kv2_4_4
= new KeyValue(row2
, cf1
, col
, ts
+ 4,
5475 KeyValue
.Type
.Put
, null);
5476 KeyValue kv3_2_1
= new KeyValue(row3
, cf2
, col
, ts
, KeyValue
.Type
.Put
,
5478 KeyValue kv3_2_2
= new KeyValue(row3
, cf1
, col
, ts
+ 4,
5479 KeyValue
.Type
.Put
, null);
5480 KeyValue kv4_5_1
= new KeyValue(row4
, cf1
, col
, ts
, KeyValue
.Type
.Put
,
5482 KeyValue kv4_5_2
= new KeyValue(row4
, cf3
, col
, ts
, KeyValue
.Type
.Put
,
5484 KeyValue kv4_5_3
= new KeyValue(row4
, cf3
, col
, ts
+ 5,
5485 KeyValue
.Type
.Put
, null);
5486 KeyValue kv4_5_4
= new KeyValue(row4
, cf2
, col
, ts
, KeyValue
.Type
.Put
,
5488 KeyValue kv4_5_5
= new KeyValue(row4
, cf1
, col
, ts
+ 3,
5489 KeyValue
.Type
.Put
, null);
5490 KeyValue kv5_2_1
= new KeyValue(row5
, cf2
, col
, ts
, KeyValue
.Type
.Put
,
5492 KeyValue kv5_2_2
= new KeyValue(row5
, cf3
, col
, ts
, KeyValue
.Type
.Put
,
5494 // hfiles(cf1/cf2) :"row1"(1 kv) / "row2"(1 kv) / "row4"(2 kv)
5496 put
= new Put(row1
);
5499 put
= new Put(row2
);
5502 put
= new Put(row4
);
5507 // hfiles(cf1/cf3) : "row1" (1 kvs) / "row2" (1 kv) / "row4" (2 kv)
5508 put
= new Put(row4
);
5512 put
= new Put(row1
);
5515 put
= new Put(row2
);
5519 // hfiles(cf1/cf3) : "row2"(2 kv) / "row3"(1 kvs) / "row4" (1 kv)
5520 put
= new Put(row4
);
5523 put
= new Put(row2
);
5527 put
= new Put(row3
);
5531 // memstore(cf1/cf2/cf3) : "row0" (1 kvs) / "row3" ( 1 kv) / "row5" (max)
5533 put
= new Put(row0
);
5536 put
= new Put(row3
);
5539 put
= new Put(row5
);
5543 // scan range = ["row4", min), skip the max "row5"
5544 Scan scan
= new Scan(row4
);
5545 scan
.setMaxVersions(5);
5547 scan
.setReversed(true);
5548 InternalScanner scanner
= region
.getScanner(scan
);
5549 List
<Cell
> currRow
= new ArrayList
<>();
5550 boolean hasNext
= false;
5551 // 1. scan out "row4" (5 kvs), "row5" can't be scanned out since not
5552 // included in scan range
5553 // "row4" takes 2 next() calls since batch=3
5554 hasNext
= scanner
.next(currRow
);
5555 assertEquals(3, currRow
.size());
5556 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5557 .get(0).getRowLength(), row4
, 0, row4
.length
));
5558 assertTrue(hasNext
);
5560 hasNext
= scanner
.next(currRow
);
5561 assertEquals(2, currRow
.size());
5562 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(),
5563 currRow
.get(0).getRowLength(), row4
, 0,
5565 assertTrue(hasNext
);
5566 // 2. scan out "row3" (2 kv)
5568 hasNext
= scanner
.next(currRow
);
5569 assertEquals(2, currRow
.size());
5570 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5571 .get(0).getRowLength(), row3
, 0, row3
.length
));
5572 assertTrue(hasNext
);
5573 // 3. scan out "row2" (4 kvs)
5574 // "row2" takes 2 next() calls since batch=3
5576 hasNext
= scanner
.next(currRow
);
5577 assertEquals(3, currRow
.size());
5578 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5579 .get(0).getRowLength(), row2
, 0, row2
.length
));
5580 assertTrue(hasNext
);
5582 hasNext
= scanner
.next(currRow
);
5583 assertEquals(1, currRow
.size());
5584 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5585 .get(0).getRowLength(), row2
, 0, row2
.length
));
5586 assertTrue(hasNext
);
5587 // 4. scan out "row1" (2 kv)
5589 hasNext
= scanner
.next(currRow
);
5590 assertEquals(2, currRow
.size());
5591 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5592 .get(0).getRowLength(), row1
, 0, row1
.length
));
5593 assertTrue(hasNext
);
5594 // 5. scan out "row0" (1 kv)
5596 hasNext
= scanner
.next(currRow
);
5597 assertEquals(1, currRow
.size());
5598 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5599 .get(0).getRowLength(), row0
, 0, row0
.length
));
5600 assertFalse(hasNext
);
5604 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5610 public void testReverseScanner_FromMemStoreAndHFiles_MultiCFs2()
5611 throws IOException
{
5612 byte[] row1
= Bytes
.toBytes("row1");
5613 byte[] row2
= Bytes
.toBytes("row2");
5614 byte[] row3
= Bytes
.toBytes("row3");
5615 byte[] row4
= Bytes
.toBytes("row4");
5616 byte[] cf1
= Bytes
.toBytes("CF1");
5617 byte[] cf2
= Bytes
.toBytes("CF2");
5618 byte[] cf3
= Bytes
.toBytes("CF3");
5619 byte[] cf4
= Bytes
.toBytes("CF4");
5620 byte[][] families
= { cf1
, cf2
, cf3
, cf4
};
5621 byte[] col
= Bytes
.toBytes("C");
5623 HBaseConfiguration conf
= new HBaseConfiguration();
5624 // disable compactions in this test.
5625 conf
.setInt("hbase.hstore.compactionThreshold", 10000);
5626 this.region
= initHRegion(tableName
, method
, conf
, families
);
5628 KeyValue kv1
= new KeyValue(row1
, cf1
, col
, ts
, KeyValue
.Type
.Put
, null);
5629 KeyValue kv2
= new KeyValue(row2
, cf2
, col
, ts
, KeyValue
.Type
.Put
, null);
5630 KeyValue kv3
= new KeyValue(row3
, cf3
, col
, ts
, KeyValue
.Type
.Put
, null);
5631 KeyValue kv4
= new KeyValue(row4
, cf4
, col
, ts
, KeyValue
.Type
.Put
, null);
5633 Put put
= new Put(row1
);
5638 put
= new Put(row2
);
5643 put
= new Put(row3
);
5648 put
= new Put(row4
);
5651 // scan range = ["row4", min)
5652 Scan scan
= new Scan(row4
);
5653 scan
.setReversed(true);
5655 InternalScanner scanner
= region
.getScanner(scan
);
5656 List
<Cell
> currRow
= new ArrayList
<>();
5657 boolean hasNext
= scanner
.next(currRow
);
5658 assertEquals(1, currRow
.size());
5659 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5660 .get(0).getRowLength(), row4
, 0, row4
.length
));
5661 assertTrue(hasNext
);
5663 hasNext
= scanner
.next(currRow
);
5664 assertEquals(1, currRow
.size());
5665 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5666 .get(0).getRowLength(), row3
, 0, row3
.length
));
5667 assertTrue(hasNext
);
5669 hasNext
= scanner
.next(currRow
);
5670 assertEquals(1, currRow
.size());
5671 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5672 .get(0).getRowLength(), row2
, 0, row2
.length
));
5673 assertTrue(hasNext
);
5675 hasNext
= scanner
.next(currRow
);
5676 assertEquals(1, currRow
.size());
5677 assertTrue(Bytes
.equals(currRow
.get(0).getRowArray(), currRow
.get(0).getRowOffset(), currRow
5678 .get(0).getRowLength(), row1
, 0, row1
.length
));
5679 assertFalse(hasNext
);
5681 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5687 * Test for HBASE-14497: Reverse Scan threw StackOverflow caused by readPt checking
5690 public void testReverseScanner_StackOverflow() throws IOException
{
5691 byte[] cf1
= Bytes
.toBytes("CF1");
5692 byte[][] families
= {cf1
};
5693 byte[] col
= Bytes
.toBytes("C");
5694 HBaseConfiguration conf
= new HBaseConfiguration();
5695 this.region
= initHRegion(tableName
, method
, conf
, families
);
5697 // setup with one storefile and one memstore, to create scanner and get an earlier readPt
5698 Put put
= new Put(Bytes
.toBytes("19998"));
5699 put
.addColumn(cf1
, col
, Bytes
.toBytes("val"));
5701 region
.flushcache(true, true, FlushLifeCycleTracker
.DUMMY
);
5702 Put put2
= new Put(Bytes
.toBytes("19997"));
5703 put2
.addColumn(cf1
, col
, Bytes
.toBytes("val"));
5706 Scan scan
= new Scan(Bytes
.toBytes("19998"));
5707 scan
.setReversed(true);
5708 InternalScanner scanner
= region
.getScanner(scan
);
5710 // create one storefile contains many rows will be skipped
5711 // to check StoreFileScanner.seekToPreviousRow
5712 for (int i
= 10000; i
< 20000; i
++) {
5713 Put p
= new Put(Bytes
.toBytes(""+i
));
5714 p
.addColumn(cf1
, col
, Bytes
.toBytes("" + i
));
5717 region
.flushcache(true, true, FlushLifeCycleTracker
.DUMMY
);
5719 // create one memstore contains many rows will be skipped
5720 // to check MemStoreScanner.seekToPreviousRow
5721 for (int i
= 10000; i
< 20000; i
++) {
5722 Put p
= new Put(Bytes
.toBytes(""+i
));
5723 p
.addColumn(cf1
, col
, Bytes
.toBytes("" + i
));
5727 List
<Cell
> currRow
= new ArrayList
<>();
5730 hasNext
= scanner
.next(currRow
);
5732 assertEquals(2, currRow
.size());
5733 assertEquals("19998", Bytes
.toString(currRow
.get(0).getRowArray(),
5734 currRow
.get(0).getRowOffset(), currRow
.get(0).getRowLength()));
5735 assertEquals("19997", Bytes
.toString(currRow
.get(1).getRowArray(),
5736 currRow
.get(1).getRowOffset(), currRow
.get(1).getRowLength()));
5738 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5744 public void testReverseScanShouldNotScanMemstoreIfReadPtLesser() throws Exception
{
5745 byte[] cf1
= Bytes
.toBytes("CF1");
5746 byte[][] families
= { cf1
};
5747 byte[] col
= Bytes
.toBytes("C");
5748 HBaseConfiguration conf
= new HBaseConfiguration();
5749 this.region
= initHRegion(tableName
, method
, conf
, families
);
5751 // setup with one storefile and one memstore, to create scanner and get an earlier readPt
5752 Put put
= new Put(Bytes
.toBytes("19996"));
5753 put
.addColumn(cf1
, col
, Bytes
.toBytes("val"));
5755 Put put2
= new Put(Bytes
.toBytes("19995"));
5756 put2
.addColumn(cf1
, col
, Bytes
.toBytes("val"));
5758 // create a reverse scan
5759 Scan scan
= new Scan(Bytes
.toBytes("19996"));
5760 scan
.setReversed(true);
5761 RegionScannerImpl scanner
= region
.getScanner(scan
);
5763 // flush the cache. This will reset the store scanner
5764 region
.flushcache(true, true, FlushLifeCycleTracker
.DUMMY
);
5766 // create one memstore contains many rows will be skipped
5767 // to check MemStoreScanner.seekToPreviousRow
5768 for (int i
= 10000; i
< 20000; i
++) {
5769 Put p
= new Put(Bytes
.toBytes("" + i
));
5770 p
.addColumn(cf1
, col
, Bytes
.toBytes("" + i
));
5773 List
<Cell
> currRow
= new ArrayList
<>();
5775 boolean assertDone
= false;
5777 hasNext
= scanner
.next(currRow
);
5778 // With HBASE-15871, after the scanner is reset the memstore scanner should not be
5781 StoreScanner current
=
5782 (StoreScanner
) (scanner
.storeHeap
).getCurrentForTesting();
5783 List
<KeyValueScanner
> scanners
= current
.getAllScannersForTesting();
5784 assertEquals("There should be only one scanner the store file scanner", 1,
5789 assertEquals(2, currRow
.size());
5790 assertEquals("19996", Bytes
.toString(currRow
.get(0).getRowArray(),
5791 currRow
.get(0).getRowOffset(), currRow
.get(0).getRowLength()));
5792 assertEquals("19995", Bytes
.toString(currRow
.get(1).getRowArray(),
5793 currRow
.get(1).getRowOffset(), currRow
.get(1).getRowLength()));
5795 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5801 public void testWriteRequestsCounter() throws IOException
{
5802 byte[] fam
= Bytes
.toBytes("info");
5803 byte[][] families
= { fam
};
5804 this.region
= initHRegion(tableName
, method
, CONF
, families
);
5806 Assert
.assertEquals(0L, region
.getWriteRequestsCount());
5808 Put put
= new Put(row
);
5809 put
.addColumn(fam
, fam
, fam
);
5811 Assert
.assertEquals(0L, region
.getWriteRequestsCount());
5813 Assert
.assertEquals(1L, region
.getWriteRequestsCount());
5815 Assert
.assertEquals(2L, region
.getWriteRequestsCount());
5817 Assert
.assertEquals(3L, region
.getWriteRequestsCount());
5819 region
.delete(new Delete(row
));
5820 Assert
.assertEquals(4L, region
.getWriteRequestsCount());
5822 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
5827 public void testOpenRegionWrittenToWAL() throws Exception
{
5828 final ServerName serverName
= ServerName
.valueOf(name
.getMethodName(), 100, 42);
5829 final RegionServerServices rss
= spy(TEST_UTIL
.createMockRegionServerService(serverName
));
5831 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
5832 htd
.addFamily(new HColumnDescriptor(fam1
));
5833 htd
.addFamily(new HColumnDescriptor(fam2
));
5835 HRegionInfo hri
= new HRegionInfo(htd
.getTableName(),
5836 HConstants
.EMPTY_BYTE_ARRAY
, HConstants
.EMPTY_BYTE_ARRAY
);
5838 // open the region w/o rss and wal and flush some files
5840 HBaseTestingUtility
.createRegionAndWAL(hri
, TEST_UTIL
.getDataTestDir(), TEST_UTIL
5841 .getConfiguration(), htd
);
5842 assertNotNull(region
);
5844 // create a file in fam1 for the region before opening in OpenRegionHandler
5845 region
.put(new Put(Bytes
.toBytes("a")).addColumn(fam1
, fam1
, fam1
));
5847 HBaseTestingUtility
.closeRegionAndWAL(region
);
5849 ArgumentCaptor
<WALEdit
> editCaptor
= ArgumentCaptor
.forClass(WALEdit
.class);
5851 // capture append() calls
5852 WAL wal
= mockWAL();
5853 when(rss
.getWAL((HRegionInfo
) any())).thenReturn(wal
);
5856 region
= HRegion
.openHRegion(hri
, htd
, rss
.getWAL(hri
),
5857 TEST_UTIL
.getConfiguration(), rss
, null);
5859 verify(wal
, times(1)).append((HRegionInfo
)any(), (WALKeyImpl
)any()
5860 , editCaptor
.capture(), anyBoolean());
5862 WALEdit edit
= editCaptor
.getValue();
5863 assertNotNull(edit
);
5864 assertNotNull(edit
.getCells());
5865 assertEquals(1, edit
.getCells().size());
5866 RegionEventDescriptor desc
= WALEdit
.getRegionEventDescriptor(edit
.getCells().get(0));
5867 assertNotNull(desc
);
5869 LOG
.info("RegionEventDescriptor from WAL: " + desc
);
5871 assertEquals(RegionEventDescriptor
.EventType
.REGION_OPEN
, desc
.getEventType());
5872 assertTrue(Bytes
.equals(desc
.getTableName().toByteArray(), htd
.getTableName().toBytes()));
5873 assertTrue(Bytes
.equals(desc
.getEncodedRegionName().toByteArray(),
5874 hri
.getEncodedNameAsBytes()));
5875 assertTrue(desc
.getLogSequenceNumber() > 0);
5876 assertEquals(serverName
, ProtobufUtil
.toServerName(desc
.getServer()));
5877 assertEquals(2, desc
.getStoresCount());
5879 StoreDescriptor store
= desc
.getStores(0);
5880 assertTrue(Bytes
.equals(store
.getFamilyName().toByteArray(), fam1
));
5881 assertEquals(store
.getStoreHomeDir(), Bytes
.toString(fam1
));
5882 assertEquals(1, store
.getStoreFileCount()); // 1store file
5883 assertFalse(store
.getStoreFile(0).contains("/")); // ensure path is relative
5885 store
= desc
.getStores(1);
5886 assertTrue(Bytes
.equals(store
.getFamilyName().toByteArray(), fam2
));
5887 assertEquals(store
.getStoreHomeDir(), Bytes
.toString(fam2
));
5888 assertEquals(0, store
.getStoreFileCount()); // no store files
5891 HBaseTestingUtility
.closeRegionAndWAL(region
);
5895 // Helper for test testOpenRegionWrittenToWALForLogReplay
5896 static class HRegionWithSeqId
extends HRegion
{
5897 public HRegionWithSeqId(final Path tableDir
, final WAL wal
, final FileSystem fs
,
5898 final Configuration confParam
, final RegionInfo regionInfo
,
5899 final TableDescriptor htd
, final RegionServerServices rsServices
) {
5900 super(tableDir
, wal
, fs
, confParam
, regionInfo
, htd
, rsServices
);
5903 protected long getNextSequenceId(WAL wal
) throws IOException
{
5909 public void testFlushedFileWithNoTags() throws Exception
{
5910 final TableName tableName
= TableName
.valueOf(name
.getMethodName());
5911 HTableDescriptor htd
= new HTableDescriptor(tableName
);
5912 htd
.addFamily(new HColumnDescriptor(fam1
));
5913 HRegionInfo info
= new HRegionInfo(tableName
, null, null, false);
5914 Path path
= TEST_UTIL
.getDataTestDir(getClass().getSimpleName());
5915 region
= HBaseTestingUtility
.createRegionAndWAL(info
, path
, TEST_UTIL
.getConfiguration(), htd
);
5916 Put put
= new Put(Bytes
.toBytes("a-b-0-0"));
5917 put
.addColumn(fam1
, qual1
, Bytes
.toBytes("c1-value"));
5920 HStore store
= region
.getStore(fam1
);
5921 Collection
<HStoreFile
> storefiles
= store
.getStorefiles();
5922 for (HStoreFile sf
: storefiles
) {
5923 assertFalse("Tags should not be present "
5924 ,sf
.getReader().getHFileReader().getFileContext().isIncludesTags());
5929 * Utility method to setup a WAL mock.
5930 * Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
5932 * @throws IOException
5934 private WAL
mockWAL() throws IOException
{
5935 WAL wal
= mock(WAL
.class);
5936 Mockito
.when(wal
.append((HRegionInfo
)Mockito
.any(),
5937 (WALKeyImpl
)Mockito
.any(), (WALEdit
)Mockito
.any(), Mockito
.anyBoolean())).
5938 thenAnswer(new Answer
<Long
>() {
5940 public Long
answer(InvocationOnMock invocation
) throws Throwable
{
5941 WALKeyImpl key
= invocation
.getArgument(1);
5942 MultiVersionConcurrencyControl
.WriteEntry we
= key
.getMvcc().begin();
5943 key
.setWriteEntry(we
);
5952 public void testCloseRegionWrittenToWAL() throws Exception
{
5954 Path rootDir
= new Path(dir
+ name
.getMethodName());
5955 FSUtils
.setRootDir(TEST_UTIL
.getConfiguration(), rootDir
);
5957 final ServerName serverName
= ServerName
.valueOf("testCloseRegionWrittenToWAL", 100, 42);
5958 final RegionServerServices rss
= spy(TEST_UTIL
.createMockRegionServerService(serverName
));
5960 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
5961 htd
.addFamily(new HColumnDescriptor(fam1
));
5962 htd
.addFamily(new HColumnDescriptor(fam2
));
5964 final HRegionInfo hri
= new HRegionInfo(htd
.getTableName(),
5965 HConstants
.EMPTY_BYTE_ARRAY
, HConstants
.EMPTY_BYTE_ARRAY
);
5967 ArgumentCaptor
<WALEdit
> editCaptor
= ArgumentCaptor
.forClass(WALEdit
.class);
5969 // capture append() calls
5970 WAL wal
= mockWAL();
5971 when(rss
.getWAL((HRegionInfo
) any())).thenReturn(wal
);
5974 // create and then open a region first so that it can be closed later
5975 region
= HRegion
.createHRegion(hri
, rootDir
, TEST_UTIL
.getConfiguration(), htd
, rss
.getWAL(hri
));
5976 region
= HRegion
.openHRegion(hri
, htd
, rss
.getWAL(hri
),
5977 TEST_UTIL
.getConfiguration(), rss
, null);
5980 region
.close(false);
5982 // 2 times, one for region open, the other close region
5983 verify(wal
, times(2)).append((HRegionInfo
)any(), (WALKeyImpl
)any(),
5984 editCaptor
.capture(), anyBoolean());
5986 WALEdit edit
= editCaptor
.getAllValues().get(1);
5987 assertNotNull(edit
);
5988 assertNotNull(edit
.getCells());
5989 assertEquals(1, edit
.getCells().size());
5990 RegionEventDescriptor desc
= WALEdit
.getRegionEventDescriptor(edit
.getCells().get(0));
5991 assertNotNull(desc
);
5993 LOG
.info("RegionEventDescriptor from WAL: " + desc
);
5995 assertEquals(RegionEventDescriptor
.EventType
.REGION_CLOSE
, desc
.getEventType());
5996 assertTrue(Bytes
.equals(desc
.getTableName().toByteArray(), htd
.getTableName().toBytes()));
5997 assertTrue(Bytes
.equals(desc
.getEncodedRegionName().toByteArray(),
5998 hri
.getEncodedNameAsBytes()));
5999 assertTrue(desc
.getLogSequenceNumber() > 0);
6000 assertEquals(serverName
, ProtobufUtil
.toServerName(desc
.getServer()));
6001 assertEquals(2, desc
.getStoresCount());
6003 StoreDescriptor store
= desc
.getStores(0);
6004 assertTrue(Bytes
.equals(store
.getFamilyName().toByteArray(), fam1
));
6005 assertEquals(store
.getStoreHomeDir(), Bytes
.toString(fam1
));
6006 assertEquals(0, store
.getStoreFileCount()); // no store files
6008 store
= desc
.getStores(1);
6009 assertTrue(Bytes
.equals(store
.getFamilyName().toByteArray(), fam2
));
6010 assertEquals(store
.getStoreHomeDir(), Bytes
.toString(fam2
));
6011 assertEquals(0, store
.getStoreFileCount()); // no store files
6015 * Test RegionTooBusyException thrown when region is busy
6018 public void testRegionTooBusy() throws IOException
{
6019 byte[] family
= Bytes
.toBytes("family");
6020 long defaultBusyWaitDuration
= CONF
.getLong("hbase.busy.wait.duration",
6021 HRegion
.DEFAULT_BUSY_WAIT_DURATION
);
6022 CONF
.setLong("hbase.busy.wait.duration", 1000);
6023 region
= initHRegion(tableName
, method
, CONF
, family
);
6024 final AtomicBoolean stopped
= new AtomicBoolean(true);
6025 Thread t
= new Thread(new Runnable() {
6029 region
.lock
.writeLock().lock();
6031 while (!stopped
.get()) {
6034 } catch (InterruptedException ie
) {
6036 region
.lock
.writeLock().unlock();
6041 Get get
= new Get(row
);
6043 while (stopped
.get()) {
6047 fail("Should throw RegionTooBusyException");
6048 } catch (InterruptedException ie
) {
6049 fail("test interrupted");
6050 } catch (RegionTooBusyException e
) {
6056 } catch (Throwable e
) {
6059 HBaseTestingUtility
.closeRegionAndWAL(region
);
6061 CONF
.setLong("hbase.busy.wait.duration", defaultBusyWaitDuration
);
6066 public void testCellTTLs() throws IOException
{
6067 IncrementingEnvironmentEdge edge
= new IncrementingEnvironmentEdge();
6068 EnvironmentEdgeManager
.injectEdge(edge
);
6070 final byte[] row
= Bytes
.toBytes("testRow");
6071 final byte[] q1
= Bytes
.toBytes("q1");
6072 final byte[] q2
= Bytes
.toBytes("q2");
6073 final byte[] q3
= Bytes
.toBytes("q3");
6074 final byte[] q4
= Bytes
.toBytes("q4");
6076 HTableDescriptor htd
= new HTableDescriptor(TableName
.valueOf(name
.getMethodName()));
6077 HColumnDescriptor hcd
= new HColumnDescriptor(fam1
);
6078 hcd
.setTimeToLive(10); // 10 seconds
6081 Configuration conf
= new Configuration(TEST_UTIL
.getConfiguration());
6082 conf
.setInt(HFile
.FORMAT_VERSION_KEY
, HFile
.MIN_FORMAT_VERSION_WITH_TAGS
);
6084 HRegion region
= HBaseTestingUtility
.createRegionAndWAL(new HRegionInfo(htd
.getTableName(),
6085 HConstants
.EMPTY_BYTE_ARRAY
, HConstants
.EMPTY_BYTE_ARRAY
),
6086 TEST_UTIL
.getDataTestDir(), conf
, htd
);
6087 assertNotNull(region
);
6089 long now
= EnvironmentEdgeManager
.currentTime();
6090 // Add a cell that will expire in 5 seconds via cell TTL
6091 region
.put(new Put(row
).add(new KeyValue(row
, fam1
, q1
, now
,
6092 HConstants
.EMPTY_BYTE_ARRAY
, new ArrayBackedTag
[] {
6093 // TTL tags specify ts in milliseconds
6094 new ArrayBackedTag(TagType
.TTL_TAG_TYPE
, Bytes
.toBytes(5000L)) } )));
6095 // Add a cell that will expire after 10 seconds via family setting
6096 region
.put(new Put(row
).addColumn(fam1
, q2
, now
, HConstants
.EMPTY_BYTE_ARRAY
));
6097 // Add a cell that will expire in 15 seconds via cell TTL
6098 region
.put(new Put(row
).add(new KeyValue(row
, fam1
, q3
, now
+ 10000 - 1,
6099 HConstants
.EMPTY_BYTE_ARRAY
, new ArrayBackedTag
[] {
6100 // TTL tags specify ts in milliseconds
6101 new ArrayBackedTag(TagType
.TTL_TAG_TYPE
, Bytes
.toBytes(5000L)) } )));
6102 // Add a cell that will expire in 20 seconds via family setting
6103 region
.put(new Put(row
).addColumn(fam1
, q4
, now
+ 10000 - 1, HConstants
.EMPTY_BYTE_ARRAY
));
6105 // Flush so we are sure store scanning gets this right
6108 // A query at time T+0 should return all cells
6109 Result r
= region
.get(new Get(row
));
6110 assertNotNull(r
.getValue(fam1
, q1
));
6111 assertNotNull(r
.getValue(fam1
, q2
));
6112 assertNotNull(r
.getValue(fam1
, q3
));
6113 assertNotNull(r
.getValue(fam1
, q4
));
6115 // Increment time to T+5 seconds
6116 edge
.incrementTime(5000);
6118 r
= region
.get(new Get(row
));
6119 assertNull(r
.getValue(fam1
, q1
));
6120 assertNotNull(r
.getValue(fam1
, q2
));
6121 assertNotNull(r
.getValue(fam1
, q3
));
6122 assertNotNull(r
.getValue(fam1
, q4
));
6124 // Increment time to T+10 seconds
6125 edge
.incrementTime(5000);
6127 r
= region
.get(new Get(row
));
6128 assertNull(r
.getValue(fam1
, q1
));
6129 assertNull(r
.getValue(fam1
, q2
));
6130 assertNotNull(r
.getValue(fam1
, q3
));
6131 assertNotNull(r
.getValue(fam1
, q4
));
6133 // Increment time to T+15 seconds
6134 edge
.incrementTime(5000);
6136 r
= region
.get(new Get(row
));
6137 assertNull(r
.getValue(fam1
, q1
));
6138 assertNull(r
.getValue(fam1
, q2
));
6139 assertNull(r
.getValue(fam1
, q3
));
6140 assertNotNull(r
.getValue(fam1
, q4
));
6142 // Increment time to T+20 seconds
6143 edge
.incrementTime(10000);
6145 r
= region
.get(new Get(row
));
6146 assertNull(r
.getValue(fam1
, q1
));
6147 assertNull(r
.getValue(fam1
, q2
));
6148 assertNull(r
.getValue(fam1
, q3
));
6149 assertNull(r
.getValue(fam1
, q4
));
6151 // Fun with disappearing increments
6154 region
.put(new Put(row
).addColumn(fam1
, q1
, Bytes
.toBytes(1L)));
6155 r
= region
.get(new Get(row
));
6156 byte[] val
= r
.getValue(fam1
, q1
);
6158 assertEquals(1L, Bytes
.toLong(val
));
6160 // Increment with a TTL of 5 seconds
6161 Increment incr
= new Increment(row
).addColumn(fam1
, q1
, 1L);
6163 region
.increment(incr
); // 2
6165 // New value should be 2
6166 r
= region
.get(new Get(row
));
6167 val
= r
.getValue(fam1
, q1
);
6169 assertEquals(2L, Bytes
.toLong(val
));
6171 // Increment time to T+25 seconds
6172 edge
.incrementTime(5000);
6174 // Value should be back to 1
6175 r
= region
.get(new Get(row
));
6176 val
= r
.getValue(fam1
, q1
);
6178 assertEquals(1L, Bytes
.toLong(val
));
6180 // Increment time to T+30 seconds
6181 edge
.incrementTime(5000);
6183 // Original value written at T+20 should be gone now via family TTL
6184 r
= region
.get(new Get(row
));
6185 assertNull(r
.getValue(fam1
, q1
));
6188 HBaseTestingUtility
.closeRegionAndWAL(region
);
6193 public void testIncrementTimestampsAreMonotonic() throws IOException
{
6194 HRegion region
= initHRegion(tableName
, method
, CONF
, fam1
);
6195 ManualEnvironmentEdge edge
= new ManualEnvironmentEdge();
6196 EnvironmentEdgeManager
.injectEdge(edge
);
6199 Increment inc
= new Increment(row
);
6200 inc
.setDurability(Durability
.SKIP_WAL
);
6201 inc
.addColumn(fam1
, qual1
, 1L);
6202 region
.increment(inc
);
6204 Result result
= region
.get(new Get(row
));
6205 Cell c
= result
.getColumnLatestCell(fam1
, qual1
);
6207 assertEquals(10L, c
.getTimestamp());
6209 edge
.setValue(1); // clock goes back
6210 region
.increment(inc
);
6211 result
= region
.get(new Get(row
));
6212 c
= result
.getColumnLatestCell(fam1
, qual1
);
6213 assertEquals(11L, c
.getTimestamp());
6214 assertEquals(2L, Bytes
.toLong(c
.getValueArray(), c
.getValueOffset(), c
.getValueLength()));
6218 public void testAppendTimestampsAreMonotonic() throws IOException
{
6219 HRegion region
= initHRegion(tableName
, method
, CONF
, fam1
);
6220 ManualEnvironmentEdge edge
= new ManualEnvironmentEdge();
6221 EnvironmentEdgeManager
.injectEdge(edge
);
6224 Append a
= new Append(row
);
6225 a
.setDurability(Durability
.SKIP_WAL
);
6226 a
.addColumn(fam1
, qual1
, qual1
);
6229 Result result
= region
.get(new Get(row
));
6230 Cell c
= result
.getColumnLatestCell(fam1
, qual1
);
6232 assertEquals(10L, c
.getTimestamp());
6234 edge
.setValue(1); // clock goes back
6236 result
= region
.get(new Get(row
));
6237 c
= result
.getColumnLatestCell(fam1
, qual1
);
6238 assertEquals(11L, c
.getTimestamp());
6240 byte[] expected
= new byte[qual1
.length
*2];
6241 System
.arraycopy(qual1
, 0, expected
, 0, qual1
.length
);
6242 System
.arraycopy(qual1
, 0, expected
, qual1
.length
, qual1
.length
);
6244 assertTrue(Bytes
.equals(c
.getValueArray(), c
.getValueOffset(), c
.getValueLength(),
6245 expected
, 0, expected
.length
));
6249 public void testCheckAndMutateTimestampsAreMonotonic() throws IOException
{
6250 HRegion region
= initHRegion(tableName
, method
, CONF
, fam1
);
6251 ManualEnvironmentEdge edge
= new ManualEnvironmentEdge();
6252 EnvironmentEdgeManager
.injectEdge(edge
);
6255 Put p
= new Put(row
);
6256 p
.setDurability(Durability
.SKIP_WAL
);
6257 p
.addColumn(fam1
, qual1
, qual1
);
6260 Result result
= region
.get(new Get(row
));
6261 Cell c
= result
.getColumnLatestCell(fam1
, qual1
);
6263 assertEquals(10L, c
.getTimestamp());
6265 edge
.setValue(1); // clock goes back
6267 p
.setDurability(Durability
.SKIP_WAL
);
6268 p
.addColumn(fam1
, qual1
, qual2
);
6269 region
.checkAndMutate(row
, fam1
, qual1
, CompareOperator
.EQUAL
, new BinaryComparator(qual1
), p
, false);
6270 result
= region
.get(new Get(row
));
6271 c
= result
.getColumnLatestCell(fam1
, qual1
);
6272 assertEquals(10L, c
.getTimestamp());
6274 assertTrue(Bytes
.equals(c
.getValueArray(), c
.getValueOffset(), c
.getValueLength(),
6275 qual2
, 0, qual2
.length
));
6279 public void testBatchMutateWithWrongRegionException() throws Exception
{
6280 final byte[] a
= Bytes
.toBytes("a");
6281 final byte[] b
= Bytes
.toBytes("b");
6282 final byte[] c
= Bytes
.toBytes("c"); // exclusive
6284 int prevLockTimeout
= CONF
.getInt("hbase.rowlock.wait.duration", 30000);
6285 CONF
.setInt("hbase.rowlock.wait.duration", 1000);
6286 final HRegion region
= initHRegion(tableName
, a
, c
, method
, CONF
, false, fam1
);
6288 Mutation
[] mutations
= new Mutation
[] {
6290 .add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
6293 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
6294 .setType(Cell
.Type
.Put
)
6296 // this is outside the region boundary
6297 new Put(c
).add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
6300 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
6303 new Put(b
).add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
6306 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
6307 .setType(Cell
.Type
.Put
)
6311 OperationStatus
[] status
= region
.batchMutate(mutations
);
6312 assertEquals(OperationStatusCode
.SUCCESS
, status
[0].getOperationStatusCode());
6313 assertEquals(OperationStatusCode
.SANITY_CHECK_FAILURE
, status
[1].getOperationStatusCode());
6314 assertEquals(OperationStatusCode
.SUCCESS
, status
[2].getOperationStatusCode());
6317 // test with a row lock held for a long time
6318 final CountDownLatch obtainedRowLock
= new CountDownLatch(1);
6319 ExecutorService exec
= Executors
.newFixedThreadPool(2);
6320 Future
<Void
> f1
= exec
.submit(new Callable
<Void
>() {
6322 public Void
call() throws Exception
{
6323 LOG
.info("Acquiring row lock");
6324 RowLock rl
= region
.getRowLock(b
);
6325 obtainedRowLock
.countDown();
6326 LOG
.info("Waiting for 5 seconds before releasing lock");
6327 Threads
.sleep(5000);
6328 LOG
.info("Releasing row lock");
6333 obtainedRowLock
.await(30, TimeUnit
.SECONDS
);
6335 Future
<Void
> f2
= exec
.submit(new Callable
<Void
>() {
6337 public Void
call() throws Exception
{
6338 Mutation
[] mutations
= new Mutation
[] {
6339 new Put(a
).add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
6342 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
6343 .setType(Cell
.Type
.Put
)
6345 new Put(b
).add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
6348 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
6349 .setType(Cell
.Type
.Put
)
6353 // this will wait for the row lock, and it will eventually succeed
6354 OperationStatus
[] status
= region
.batchMutate(mutations
);
6355 assertEquals(OperationStatusCode
.SUCCESS
, status
[0].getOperationStatusCode());
6356 assertEquals(OperationStatusCode
.SUCCESS
, status
[1].getOperationStatusCode());
6364 CONF
.setInt("hbase.rowlock.wait.duration", prevLockTimeout
);
6368 public void testCheckAndRowMutateTimestampsAreMonotonic() throws IOException
{
6369 HRegion region
= initHRegion(tableName
, method
, CONF
, fam1
);
6370 ManualEnvironmentEdge edge
= new ManualEnvironmentEdge();
6371 EnvironmentEdgeManager
.injectEdge(edge
);
6374 Put p
= new Put(row
);
6375 p
.setDurability(Durability
.SKIP_WAL
);
6376 p
.addColumn(fam1
, qual1
, qual1
);
6379 Result result
= region
.get(new Get(row
));
6380 Cell c
= result
.getColumnLatestCell(fam1
, qual1
);
6382 assertEquals(10L, c
.getTimestamp());
6384 edge
.setValue(1); // clock goes back
6386 p
.setDurability(Durability
.SKIP_WAL
);
6387 p
.addColumn(fam1
, qual1
, qual2
);
6388 RowMutations rm
= new RowMutations(row
);
6390 assertTrue(region
.checkAndRowMutate(row
, fam1
, qual1
, CompareOperator
.EQUAL
,
6391 new BinaryComparator(qual1
), rm
, false));
6392 result
= region
.get(new Get(row
));
6393 c
= result
.getColumnLatestCell(fam1
, qual1
);
6394 assertEquals(10L, c
.getTimestamp());
6395 LOG
.info("c value " +
6396 Bytes
.toStringBinary(c
.getValueArray(), c
.getValueOffset(), c
.getValueLength()));
6398 assertTrue(Bytes
.equals(c
.getValueArray(), c
.getValueOffset(), c
.getValueLength(),
6399 qual2
, 0, qual2
.length
));
6402 HRegion
initHRegion(TableName tableName
, String callingMethod
,
6403 byte[]... families
) throws IOException
{
6404 return initHRegion(tableName
, callingMethod
, HBaseConfiguration
.create(),
6409 * HBASE-16429 Make sure no stuck if roll writer when ring buffer is filled with appends
6410 * @throws IOException if IO error occurred during test
6413 public void testWritesWhileRollWriter() throws IOException
{
6416 int numFamilies
= 2;
6417 int numQualifiers
= 2;
6418 final byte[][] families
= new byte[numFamilies
][];
6419 for (int i
= 0; i
< numFamilies
; i
++) {
6420 families
[i
] = Bytes
.toBytes("family" + i
);
6422 final byte[][] qualifiers
= new byte[numQualifiers
][];
6423 for (int i
= 0; i
< numQualifiers
; i
++) {
6424 qualifiers
[i
] = Bytes
.toBytes("qual" + i
);
6427 CONF
.setInt("hbase.regionserver.wal.disruptor.event.count", 2);
6428 this.region
= initHRegion(tableName
, method
, CONF
, families
);
6430 List
<Thread
> threads
= new ArrayList
<>();
6431 for (int i
= 0; i
< numRows
; i
++) {
6432 final int count
= i
;
6433 Thread t
= new Thread(new Runnable() {
6437 byte[] row
= Bytes
.toBytes("row" + count
);
6438 Put put
= new Put(row
);
6439 put
.setDurability(Durability
.SYNC_WAL
);
6440 byte[] value
= Bytes
.toBytes(String
.valueOf(count
));
6441 for (byte[] family
: families
) {
6442 for (byte[] qualifier
: qualifiers
) {
6443 put
.addColumn(family
, qualifier
, count
, value
);
6448 } catch (IOException e
) {
6449 throw new RuntimeException(e
);
6455 for (Thread t
: threads
) {
6459 for (int i
= 0; i
< testCount
; i
++) {
6460 region
.getWAL().rollWriter();
6465 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
6466 CONF
.setInt("hbase.regionserver.wal.disruptor.event.count", 16 * 1024);
6467 } catch (DroppedSnapshotException dse
) {
6468 // We could get this on way out because we interrupt the background flusher and it could
6469 // fail anywhere causing a DSE over in the background flusher... only it is not properly
6470 // dealt with so could still be memory hanging out when we get to here -- memory we can't
6471 // flush because the accounting is 'off' since original DSE.
6478 public void testMutateRow_WriteRequestCount() throws Exception
{
6479 byte[] row1
= Bytes
.toBytes("row1");
6480 byte[] fam1
= Bytes
.toBytes("fam1");
6481 byte[] qf1
= Bytes
.toBytes("qualifier");
6482 byte[] val1
= Bytes
.toBytes("value1");
6484 RowMutations rm
= new RowMutations(row1
);
6485 Put put
= new Put(row1
);
6486 put
.addColumn(fam1
, qf1
, val1
);
6489 this.region
= initHRegion(tableName
, method
, CONF
, fam1
);
6491 long wrcBeforeMutate
= this.region
.writeRequestsCount
.longValue();
6492 this.region
.mutateRow(rm
);
6493 long wrcAfterMutate
= this.region
.writeRequestsCount
.longValue();
6494 Assert
.assertEquals(wrcBeforeMutate
+ rm
.getMutations().size(), wrcAfterMutate
);
6496 HBaseTestingUtility
.closeRegionAndWAL(this.region
);
6502 * The same as HRegion class, the only difference is that instantiateHStore will
6503 * create a different HStore - HStoreForTesting. [HBASE-8518]
6505 public static class HRegionForTesting
extends HRegion
{
6507 public HRegionForTesting(final Path tableDir
, final WAL wal
, final FileSystem fs
,
6508 final Configuration confParam
, final RegionInfo regionInfo
,
6509 final TableDescriptor htd
, final RegionServerServices rsServices
) {
6510 this(new HRegionFileSystem(confParam
, fs
, tableDir
, regionInfo
),
6511 wal
, confParam
, htd
, rsServices
);
6514 public HRegionForTesting(HRegionFileSystem fs
, WAL wal
,
6515 Configuration confParam
, TableDescriptor htd
,
6516 RegionServerServices rsServices
) {
6517 super(fs
, wal
, confParam
, htd
, rsServices
);
6521 * Create HStore instance.
6522 * @return If Mob is enabled, return HMobStore, otherwise return HStoreForTesting.
6525 protected HStore
instantiateHStore(final ColumnFamilyDescriptor family
) throws IOException
{
6526 if (family
.isMobEnabled()) {
6527 if (HFile
.getFormatVersion(this.conf
) < HFile
.MIN_FORMAT_VERSION_WITH_TAGS
) {
6528 throw new IOException("A minimum HFile version of "
6529 + HFile
.MIN_FORMAT_VERSION_WITH_TAGS
6530 + " is required for MOB feature. Consider setting " + HFile
.FORMAT_VERSION_KEY
6533 return new HMobStore(this, family
, this.conf
);
6535 return new HStoreForTesting(this, family
, this.conf
);
6540 * HStoreForTesting is merely the same as HStore, the difference is in the doCompaction method
6541 * of HStoreForTesting there is a checkpoint "hbase.hstore.compaction.complete" which
6542 * doesn't let hstore compaction complete. In the former edition, this config is set in
6543 * HStore class inside compact method, though this is just for testing, otherwise it
6544 * doesn't do any help. In HBASE-8518, we try to get rid of all "hbase.hstore.compaction.complete"
6545 * config (except for testing code).
6547 public static class HStoreForTesting
extends HStore
{
6549 protected HStoreForTesting(final HRegion region
,
6550 final ColumnFamilyDescriptor family
,
6551 final Configuration confParam
) throws IOException
{
6552 super(region
, family
, confParam
);
6556 protected List
<HStoreFile
> doCompaction(CompactionRequestImpl cr
,
6557 Collection
<HStoreFile
> filesToCompact
, User user
, long compactionStartTime
,
6558 List
<Path
> newFiles
) throws IOException
{
6559 // let compaction incomplete.
6560 if (!this.conf
.getBoolean("hbase.hstore.compaction.complete", true)) {
6561 LOG
.warn("hbase.hstore.compaction.complete is set to false");
6562 List
<HStoreFile
> sfs
= new ArrayList
<>(newFiles
.size());
6563 final boolean evictOnClose
=
6564 cacheConf
!= null? cacheConf
.shouldEvictOnClose(): true;
6565 for (Path newFile
: newFiles
) {
6566 // Create storefile around what we wrote with a reader on it.
6567 HStoreFile sf
= createStoreFileAndReader(newFile
);
6568 sf
.closeStoreFile(evictOnClose
);
6573 return super.doCompaction(cr
, filesToCompact
, user
, compactionStartTime
, newFiles
);