2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to you under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 package org
.apache
.hadoop
.hbase
.quotas
;
19 import static org
.junit
.Assert
.assertEquals
;
20 import static org
.junit
.Assert
.assertFalse
;
21 import static org
.junit
.Assert
.assertTrue
;
23 import java
.util
.Collections
;
24 import java
.util
.List
;
26 import java
.util
.concurrent
.atomic
.AtomicLong
;
27 import org
.apache
.hadoop
.conf
.Configuration
;
28 import org
.apache
.hadoop
.fs
.FileStatus
;
29 import org
.apache
.hadoop
.fs
.FileSystem
;
30 import org
.apache
.hadoop
.fs
.Path
;
31 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
32 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
33 import org
.apache
.hadoop
.hbase
.TableName
;
34 import org
.apache
.hadoop
.hbase
.client
.Admin
;
35 import org
.apache
.hadoop
.hbase
.client
.Connection
;
36 import org
.apache
.hadoop
.hbase
.client
.Result
;
37 import org
.apache
.hadoop
.hbase
.client
.SnapshotType
;
38 import org
.apache
.hadoop
.hbase
.client
.Table
;
39 import org
.apache
.hadoop
.hbase
.quotas
.SpaceQuotaHelperForTests
.SpaceQuotaSnapshotPredicate
;
40 import org
.apache
.hadoop
.hbase
.regionserver
.HRegion
;
41 import org
.apache
.hadoop
.hbase
.regionserver
.Region
;
42 import org
.apache
.hadoop
.hbase
.regionserver
.Store
;
43 import org
.apache
.hadoop
.hbase
.testclassification
.MediumTests
;
44 import org
.apache
.hadoop
.hbase
.tool
.BulkLoadHFiles
;
45 import org
.junit
.AfterClass
;
46 import org
.junit
.Before
;
47 import org
.junit
.BeforeClass
;
48 import org
.junit
.ClassRule
;
49 import org
.junit
.Rule
;
50 import org
.junit
.Test
;
51 import org
.junit
.experimental
.categories
.Category
;
52 import org
.junit
.rules
.TestName
;
54 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Iterables
;
56 @Category({MediumTests
.class})
57 public class TestLowLatencySpaceQuotas
{
60 public static final HBaseClassTestRule CLASS_RULE
=
61 HBaseClassTestRule
.forClass(TestLowLatencySpaceQuotas
.class);
63 private static final HBaseTestingUtility TEST_UTIL
= new HBaseTestingUtility();
64 // Global for all tests in the class
65 private static final AtomicLong COUNTER
= new AtomicLong(0);
68 public TestName testName
= new TestName();
69 private SpaceQuotaHelperForTests helper
;
70 private Connection conn
;
74 public static void setup() throws Exception
{
75 Configuration conf
= TEST_UTIL
.getConfiguration();
76 // The default 1s period for QuotaObserverChore is good.
77 SpaceQuotaHelperForTests
.updateConfigForQuotas(conf
);
78 // Set the period/delay to read region size from HDFS to be very long
79 conf
.setInt(FileSystemUtilizationChore
.FS_UTILIZATION_CHORE_PERIOD_KEY
, 1000 * 120);
80 conf
.setInt(FileSystemUtilizationChore
.FS_UTILIZATION_CHORE_DELAY_KEY
, 1000 * 120);
81 // Set the same long period/delay to compute snapshot sizes
82 conf
.setInt(SnapshotQuotaObserverChore
.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY
, 1000 * 120);
83 conf
.setInt(SnapshotQuotaObserverChore
.SNAPSHOT_QUOTA_CHORE_DELAY_KEY
, 1000 * 120);
84 // Clean up the compacted files faster than normal (5s instead of 2mins)
85 conf
.setInt("hbase.hfile.compaction.discharger.interval", 5 * 1000);
87 TEST_UTIL
.startMiniCluster(1);
91 public static void tearDown() throws Exception
{
92 TEST_UTIL
.shutdownMiniCluster();
96 public void removeAllQuotas() throws Exception
{
97 helper
= new SpaceQuotaHelperForTests(TEST_UTIL
, testName
, COUNTER
);
98 conn
= TEST_UTIL
.getConnection();
99 admin
= TEST_UTIL
.getAdmin();
100 helper
.waitForQuotaTable(conn
);
104 public void testFlushes() throws Exception
{
105 TableName tn
= helper
.createTableWithRegions(1);
107 QuotaSettings settings
= QuotaSettingsFactory
.limitTableSpace(
108 tn
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
);
109 admin
.setQuota(settings
);
112 final long initialSize
= 2L * SpaceQuotaHelperForTests
.ONE_MEGABYTE
;
113 helper
.writeData(tn
, initialSize
);
115 // Make sure a flush happened
118 // We should be able to observe the system recording an increase in size (even
119 // though we know the filesystem scanning did not happen).
120 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
121 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
122 return snapshot
.getUsage() >= initialSize
;
128 public void testMajorCompaction() throws Exception
{
129 TableName tn
= helper
.createTableWithRegions(1);
131 QuotaSettings settings
= QuotaSettingsFactory
.limitTableSpace(
132 tn
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
);
133 admin
.setQuota(settings
);
135 // Write some data and flush it to disk.
136 final long sizePerBatch
= 2L * SpaceQuotaHelperForTests
.ONE_MEGABYTE
;
137 helper
.writeData(tn
, sizePerBatch
);
140 // Write the same data again, flushing it to a second file
141 helper
.writeData(tn
, sizePerBatch
);
144 // After two flushes, both hfiles would contain similar data. We should see 2x the data.
145 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
146 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
147 return snapshot
.getUsage() >= 2L * sizePerBatch
;
151 // Rewrite the two files into one.
152 admin
.majorCompact(tn
);
154 // After we major compact the table, we should notice quickly that the amount of data in the
155 // table is much closer to reality (the duplicate entries across the two files are removed).
156 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
157 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
158 return snapshot
.getUsage() >= sizePerBatch
&& snapshot
.getUsage() <= 2L * sizePerBatch
;
164 public void testMinorCompaction() throws Exception
{
165 TableName tn
= helper
.createTableWithRegions(1);
167 QuotaSettings settings
= QuotaSettingsFactory
.limitTableSpace(
168 tn
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
);
169 admin
.setQuota(settings
);
171 // Write some data and flush it to disk.
172 final long sizePerBatch
= 2L * SpaceQuotaHelperForTests
.ONE_MEGABYTE
;
173 final long numBatches
= 6;
174 for (long i
= 0; i
< numBatches
; i
++) {
175 helper
.writeData(tn
, sizePerBatch
);
179 HRegion region
= Iterables
.getOnlyElement(TEST_UTIL
.getHBaseCluster().getRegions(tn
));
180 long numFiles
= getNumHFilesForRegion(region
);
181 assertEquals(numBatches
, numFiles
);
183 // After two flushes, both hfiles would contain similar data. We should see 2x the data.
184 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
185 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
186 return snapshot
.getUsage() >= numFiles
* sizePerBatch
;
190 // Rewrite some files into fewer
191 TEST_UTIL
.compact(tn
, false);
192 long numFilesAfterMinorCompaction
= getNumHFilesForRegion(region
);
194 // After we major compact the table, we should notice quickly that the amount of data in the
195 // table is much closer to reality (the duplicate entries across the two files are removed).
196 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
197 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
198 return snapshot
.getUsage() >= numFilesAfterMinorCompaction
* sizePerBatch
&&
199 snapshot
.getUsage() <= (numFilesAfterMinorCompaction
+ 1) * sizePerBatch
;
204 private long getNumHFilesForRegion(HRegion region
) {
205 return region
.getStores().stream().mapToLong((s
) -> s
.getNumHFiles()).sum();
209 public void testBulkLoading() throws Exception
{
210 TableName tn
= helper
.createTableWithRegions(1);
212 QuotaSettings settings
= QuotaSettingsFactory
.limitTableSpace(
213 tn
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
);
214 admin
.setQuota(settings
);
216 Map
<byte[], List
<Path
>> family2Files
= helper
.generateFileToLoad(tn
, 3, 550);
217 // Make sure the files are about as long as we expect
218 FileSystem fs
= TEST_UTIL
.getTestFileSystem();
219 FileStatus
[] files
= fs
.listStatus(
220 new Path(fs
.getHomeDirectory(), testName
.getMethodName() + "_files"));
222 for (FileStatus file
: files
) {
224 "Expected the file, " + file
.getPath() + ", length to be larger than 25KB, but was "
226 file
.getLen() > 25 * SpaceQuotaHelperForTests
.ONE_KILOBYTE
);
227 totalSize
+= file
.getLen();
230 assertFalse("The bulk load failed",
231 BulkLoadHFiles
.create(TEST_UTIL
.getConfiguration()).bulkLoad(tn
, family2Files
).isEmpty());
233 final long finalTotalSize
= totalSize
;
234 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
236 boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
237 return snapshot
.getUsage() >= finalTotalSize
;
243 public void testSnapshotSizes() throws Exception
{
244 TableName tn
= helper
.createTableWithRegions(1);
246 QuotaSettings settings
= QuotaSettingsFactory
.limitTableSpace(
247 tn
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
);
248 admin
.setQuota(settings
);
250 // Write some data and flush it to disk.
251 final long sizePerBatch
= 2L * SpaceQuotaHelperForTests
.ONE_MEGABYTE
;
252 helper
.writeData(tn
, sizePerBatch
);
255 final String snapshot1
= "snapshot1";
256 admin
.snapshot(snapshot1
, tn
, SnapshotType
.SKIPFLUSH
);
258 // Compute the size of the file for the Region we'll send to archive
259 Region region
= Iterables
.getOnlyElement(TEST_UTIL
.getHBaseCluster().getRegions(tn
));
260 List
<?
extends Store
> stores
= region
.getStores();
262 for (Store store
: stores
) {
263 summer
+= store
.getStorefilesSize();
265 final long storeFileSize
= summer
;
267 // Wait for the table to show the usage
268 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
269 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
270 return snapshot
.getUsage() == storeFileSize
;
274 // Spoof a "full" computation of snapshot size. Normally the chore handles this, but we want
275 // to test in the absence of this chore.
276 FileArchiverNotifier notifier
= TEST_UTIL
.getHBaseCluster().getMaster()
277 .getSnapshotQuotaObserverChore().getNotifierForTable(tn
);
278 notifier
.computeAndStoreSnapshotSizes(Collections
.singletonList(snapshot1
));
280 // Force a major compaction to create a new file and push the old file to the archive
281 TEST_UTIL
.compact(tn
, true);
283 // After moving the old file to archive/, the space of this table should double
284 // We have a new file created by the majc referenced by the table and the snapshot still
285 // referencing the old file.
286 TEST_UTIL
.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn
, tn
) {
287 @Override boolean evaluate(SpaceQuotaSnapshot snapshot
) throws Exception
{
288 return snapshot
.getUsage() >= 2 * storeFileSize
;
292 try (Table quotaTable
= conn
.getTable(QuotaUtil
.QUOTA_TABLE_NAME
)) {
293 Result r
= quotaTable
.get(QuotaTableUtil
.makeGetForSnapshotSize(tn
, snapshot1
));
294 assertTrue("Expected a non-null, non-empty Result", r
!= null && !r
.isEmpty());
295 assertTrue(r
.advance());
296 assertEquals("The snapshot's size should be the same as the origin store file",
297 storeFileSize
, QuotaTableUtil
.parseSnapshotSize(r
.current()));
299 r
= quotaTable
.get(QuotaTableUtil
.createGetNamespaceSnapshotSize(tn
.getNamespaceAsString()));
300 assertTrue("Expected a non-null, non-empty Result", r
!= null && !r
.isEmpty());
301 assertTrue(r
.advance());
302 assertEquals("The snapshot's size should be the same as the origin store file",
303 storeFileSize
, QuotaTableUtil
.parseSnapshotSize(r
.current()));