2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.regionserver
;
20 import static org
.junit
.Assert
.assertFalse
;
21 import static org
.junit
.Assert
.assertNotNull
;
22 import static org
.junit
.Assert
.assertNull
;
23 import static org
.junit
.Assert
.assertSame
;
24 import static org
.junit
.Assert
.assertTrue
;
26 import java
.io
.IOException
;
27 import java
.io
.InterruptedIOException
;
28 import java
.util
.Optional
;
29 import java
.util
.concurrent
.CountDownLatch
;
30 import org
.apache
.hadoop
.hbase
.Cell
.Type
;
31 import org
.apache
.hadoop
.hbase
.CellBuilderFactory
;
32 import org
.apache
.hadoop
.hbase
.CellBuilderType
;
33 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
34 import org
.apache
.hadoop
.hbase
.HBaseTestingUtil
;
35 import org
.apache
.hadoop
.hbase
.HConstants
;
36 import org
.apache
.hadoop
.hbase
.TableName
;
37 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
38 import org
.apache
.hadoop
.hbase
.client
.Put
;
39 import org
.apache
.hadoop
.hbase
.client
.Table
;
40 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
41 import org
.apache
.hadoop
.hbase
.coprocessor
.ObserverContext
;
42 import org
.apache
.hadoop
.hbase
.coprocessor
.RegionCoprocessor
;
43 import org
.apache
.hadoop
.hbase
.coprocessor
.RegionCoprocessorEnvironment
;
44 import org
.apache
.hadoop
.hbase
.coprocessor
.RegionObserver
;
45 import org
.apache
.hadoop
.hbase
.testclassification
.CoprocessorTests
;
46 import org
.apache
.hadoop
.hbase
.testclassification
.MediumTests
;
47 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
48 import org
.junit
.After
;
49 import org
.junit
.AfterClass
;
50 import org
.junit
.Before
;
51 import org
.junit
.BeforeClass
;
52 import org
.junit
.ClassRule
;
53 import org
.junit
.Test
;
54 import org
.junit
.experimental
.categories
.Category
;
57 * Confirm that the function of FlushLifeCycleTracker is OK as we do not use it in our own code.
59 @Category({ CoprocessorTests
.class, MediumTests
.class })
60 public class TestFlushLifeCycleTracker
{
63 public static final HBaseClassTestRule CLASS_RULE
=
64 HBaseClassTestRule
.forClass(TestFlushLifeCycleTracker
.class);
66 private static final HBaseTestingUtil UTIL
= new HBaseTestingUtil();
68 private static final TableName NAME
=
69 TableName
.valueOf(TestFlushLifeCycleTracker
.class.getSimpleName());
71 private static final byte[] CF
= Bytes
.toBytes("CF");
73 private static final byte[] QUALIFIER
= Bytes
.toBytes("CQ");
75 private HRegion region
;
77 private static FlushLifeCycleTracker TRACKER
;
79 private static volatile CountDownLatch ARRIVE
;
81 private static volatile CountDownLatch BLOCK
;
83 public static final class FlushObserver
implements RegionObserver
, RegionCoprocessor
{
86 public Optional
<RegionObserver
> getRegionObserver() {
87 return Optional
.of(this);
91 public void preFlush(ObserverContext
<RegionCoprocessorEnvironment
> c
,
92 FlushLifeCycleTracker tracker
) throws IOException
{
93 if (TRACKER
!= null) {
94 assertSame(tracker
, TRACKER
);
99 public InternalScanner
preFlush(ObserverContext
<RegionCoprocessorEnvironment
> c
, Store store
,
100 InternalScanner scanner
, FlushLifeCycleTracker tracker
) throws IOException
{
101 if (TRACKER
!= null) {
102 assertSame(tracker
, TRACKER
);
108 public void postFlush(ObserverContext
<RegionCoprocessorEnvironment
> c
,
109 FlushLifeCycleTracker tracker
) throws IOException
{
110 if (TRACKER
!= null) {
111 assertSame(tracker
, TRACKER
);
116 public void postFlush(ObserverContext
<RegionCoprocessorEnvironment
> c
, Store store
,
117 StoreFile resultFile
, FlushLifeCycleTracker tracker
) throws IOException
{
118 if (TRACKER
!= null) {
119 assertSame(tracker
, TRACKER
);
121 // inject here so we can make a flush request to fail because of we already have a flush
123 CountDownLatch arrive
= ARRIVE
;
124 if (arrive
!= null) {
128 } catch (InterruptedException e
) {
129 throw new InterruptedIOException();
135 private static final class Tracker
implements FlushLifeCycleTracker
{
137 private String reason
;
139 private boolean beforeExecutionCalled
;
141 private boolean afterExecutionCalled
;
143 private boolean completed
= false;
146 public synchronized void notExecuted(String reason
) {
147 this.reason
= reason
;
153 public void beforeExecution() {
154 this.beforeExecutionCalled
= true;
158 public synchronized void afterExecution() {
159 this.afterExecutionCalled
= true;
164 public synchronized void await() throws InterruptedException
{
172 public static void setUpBeforeClass() throws Exception
{
173 UTIL
.startMiniCluster(3);
177 public static void tearDownAfterClass() throws Exception
{
178 UTIL
.shutdownMiniCluster();
182 public void setUp() throws IOException
{
184 .createTable(TableDescriptorBuilder
.newBuilder(NAME
)
185 .setColumnFamily(ColumnFamilyDescriptorBuilder
.of(CF
))
186 .setCoprocessor(FlushObserver
.class.getName()).build());
187 region
= UTIL
.getHBaseCluster().getRegions(NAME
).get(0);
191 public void tearDown() throws IOException
{
194 UTIL
.deleteTable(NAME
);
198 public void test() throws IOException
, InterruptedException
{
199 try (Table table
= UTIL
.getConnection().getTable(NAME
)) {
200 for (int i
= 0; i
< 100; i
++) {
201 byte[] row
= Bytes
.toBytes(i
);
202 table
.put(new Put(row
, true)
203 .add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
206 .setQualifier(QUALIFIER
)
207 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
209 .setValue(Bytes
.toBytes(i
))
213 Tracker tracker
= new Tracker();
215 region
.requestFlush(tracker
);
217 assertNull(tracker
.reason
);
218 assertTrue(tracker
.beforeExecutionCalled
);
219 assertTrue(tracker
.afterExecutionCalled
);
221 // request flush on a region with empty memstore should still success
222 tracker
= new Tracker();
224 region
.requestFlush(tracker
);
226 assertNull(tracker
.reason
);
227 assertTrue(tracker
.beforeExecutionCalled
);
228 assertTrue(tracker
.afterExecutionCalled
);
232 public void testNotExecuted() throws IOException
, InterruptedException
{
233 try (Table table
= UTIL
.getConnection().getTable(NAME
)) {
234 for (int i
= 0; i
< 100; i
++) {
235 byte[] row
= Bytes
.toBytes(i
);
236 table
.put(new Put(row
, true)
237 .add(CellBuilderFactory
.create(CellBuilderType
.SHALLOW_COPY
)
240 .setQualifier(QUALIFIER
)
241 .setTimestamp(HConstants
.LATEST_TIMESTAMP
)
243 .setValue(Bytes
.toBytes(i
))
247 // here we may have overlap when calling the CP hooks so we do not assert on TRACKER
248 Tracker tracker1
= new Tracker();
249 ARRIVE
= new CountDownLatch(1);
250 BLOCK
= new CountDownLatch(1);
251 region
.requestFlush(tracker1
);
254 Tracker tracker2
= new Tracker();
255 region
.requestFlush(tracker2
);
257 assertNotNull(tracker2
.reason
);
258 assertFalse(tracker2
.beforeExecutionCalled
);
259 assertFalse(tracker2
.afterExecutionCalled
);
263 assertNull(tracker1
.reason
);
264 assertTrue(tracker1
.beforeExecutionCalled
);
265 assertTrue(tracker1
.afterExecutionCalled
);