2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed with
4 * this work for additional information regarding copyright ownership.
5 * The ASF licenses this file to you under the Apache License, Version 2.0
6 * (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 package org
.apache
.hadoop
.hbase
.quotas
;
19 import static org
.junit
.Assert
.assertEquals
;
20 import static org
.junit
.Assert
.assertNotNull
;
21 import static org
.junit
.Assert
.assertTrue
;
23 import java
.io
.IOException
;
24 import java
.util
.ArrayList
;
25 import java
.util
.Arrays
;
26 import java
.util
.Collections
;
27 import java
.util
.HashSet
;
28 import java
.util
.List
;
29 import java
.util
.Map
.Entry
;
31 import java
.util
.concurrent
.atomic
.AtomicLong
;
33 import org
.apache
.hadoop
.conf
.Configuration
;
34 import org
.apache
.hadoop
.fs
.FileSystem
;
35 import org
.apache
.hadoop
.fs
.Path
;
36 import org
.apache
.hadoop
.hbase
.Cell
;
37 import org
.apache
.hadoop
.hbase
.CellScanner
;
38 import org
.apache
.hadoop
.hbase
.HBaseClassTestRule
;
39 import org
.apache
.hadoop
.hbase
.HBaseTestingUtility
;
40 import org
.apache
.hadoop
.hbase
.TableName
;
41 import org
.apache
.hadoop
.hbase
.client
.Admin
;
42 import org
.apache
.hadoop
.hbase
.client
.ColumnFamilyDescriptorBuilder
;
43 import org
.apache
.hadoop
.hbase
.client
.Connection
;
44 import org
.apache
.hadoop
.hbase
.client
.Get
;
45 import org
.apache
.hadoop
.hbase
.client
.Result
;
46 import org
.apache
.hadoop
.hbase
.client
.ResultScanner
;
47 import org
.apache
.hadoop
.hbase
.client
.Scan
;
48 import org
.apache
.hadoop
.hbase
.client
.SnapshotDescription
;
49 import org
.apache
.hadoop
.hbase
.client
.SnapshotType
;
50 import org
.apache
.hadoop
.hbase
.client
.Table
;
51 import org
.apache
.hadoop
.hbase
.client
.TableDescriptor
;
52 import org
.apache
.hadoop
.hbase
.client
.TableDescriptorBuilder
;
53 import org
.apache
.hadoop
.hbase
.quotas
.FileArchiverNotifierImpl
.SnapshotWithSize
;
54 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotDescriptionUtils
;
55 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotManifest
;
56 import org
.apache
.hadoop
.hbase
.testclassification
.MediumTests
;
57 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
58 import org
.junit
.AfterClass
;
59 import org
.junit
.Before
;
60 import org
.junit
.BeforeClass
;
61 import org
.junit
.ClassRule
;
62 import org
.junit
.Rule
;
63 import org
.junit
.Test
;
64 import org
.junit
.experimental
.categories
.Category
;
65 import org
.junit
.rules
.TestName
;
67 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.ImmutableSet
;
68 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Iterables
;
69 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.Maps
;
71 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
;
72 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
.SnapshotRegionManifest
;
73 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
.SnapshotRegionManifest
.FamilyFiles
;
74 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.SnapshotProtos
.SnapshotRegionManifest
.StoreFile
;
77 * Test class for {@link FileArchiverNotifierImpl}.
79 @Category(MediumTests
.class)
80 public class TestFileArchiverNotifierImpl
{
82 public static final HBaseClassTestRule CLASS_RULE
=
83 HBaseClassTestRule
.forClass(TestFileArchiverNotifierImpl
.class);
85 private static final HBaseTestingUtility TEST_UTIL
= new HBaseTestingUtility();
86 private static final AtomicLong COUNTER
= new AtomicLong();
89 public TestName testName
= new TestName();
91 private Connection conn
;
93 private SpaceQuotaHelperForTests helper
;
94 private FileSystem fs
;
95 private Configuration conf
;
98 public static void setUp() throws Exception
{
99 Configuration conf
= TEST_UTIL
.getConfiguration();
100 SpaceQuotaHelperForTests
.updateConfigForQuotas(conf
);
101 // Clean up the compacted files faster than normal (15s instead of 2mins)
102 conf
.setInt("hbase.hfile.compaction.discharger.interval", 15 * 1000);
103 // Prevent the SnapshotQuotaObserverChore from running
104 conf
.setInt(SnapshotQuotaObserverChore
.SNAPSHOT_QUOTA_CHORE_DELAY_KEY
, 60 * 60 * 1000);
105 conf
.setInt(SnapshotQuotaObserverChore
.SNAPSHOT_QUOTA_CHORE_PERIOD_KEY
, 60 * 60 * 1000);
106 TEST_UTIL
.startMiniCluster(1);
110 public static void tearDown() throws Exception
{
111 TEST_UTIL
.shutdownMiniCluster();
115 public void setup() throws Exception
{
116 conn
= TEST_UTIL
.getConnection();
117 admin
= TEST_UTIL
.getAdmin();
118 helper
= new SpaceQuotaHelperForTests(TEST_UTIL
, testName
, COUNTER
);
119 helper
.removeAllQuotas(conn
);
120 fs
= TEST_UTIL
.getTestFileSystem();
121 conf
= TEST_UTIL
.getConfiguration();
125 public void testSnapshotSizePersistence() throws IOException
{
126 final Admin admin
= TEST_UTIL
.getAdmin();
127 final TableName tn
= TableName
.valueOf(testName
.getMethodName());
128 if (admin
.tableExists(tn
)) {
129 admin
.disableTable(tn
);
130 admin
.deleteTable(tn
);
132 TableDescriptor desc
= TableDescriptorBuilder
.newBuilder(tn
).setColumnFamily(
133 ColumnFamilyDescriptorBuilder
.of(QuotaTableUtil
.QUOTA_FAMILY_USAGE
)).build();
134 admin
.createTable(desc
);
136 FileArchiverNotifierImpl notifier
= new FileArchiverNotifierImpl(conn
, conf
, fs
, tn
);
137 List
<SnapshotWithSize
> snapshotsWithSizes
= new ArrayList
<>();
138 try (Table table
= conn
.getTable(tn
)) {
139 // Writing no values will result in no records written.
140 verify(table
, () -> {
141 notifier
.persistSnapshotSizes(table
, snapshotsWithSizes
);
142 assertEquals(0, count(table
));
145 verify(table
, () -> {
146 snapshotsWithSizes
.add(new SnapshotWithSize("ss1", 1024L));
147 snapshotsWithSizes
.add(new SnapshotWithSize("ss2", 4096L));
148 notifier
.persistSnapshotSizes(table
, snapshotsWithSizes
);
149 assertEquals(2, count(table
));
150 assertEquals(1024L, extractSnapshotSize(table
, tn
, "ss1"));
151 assertEquals(4096L, extractSnapshotSize(table
, tn
, "ss2"));
157 public void testIncrementalFileArchiving() throws Exception
{
158 final Admin admin
= TEST_UTIL
.getAdmin();
159 final TableName tn
= TableName
.valueOf(testName
.getMethodName());
160 if (admin
.tableExists(tn
)) {
161 admin
.disableTable(tn
);
162 admin
.deleteTable(tn
);
164 final Table quotaTable
= conn
.getTable(QuotaUtil
.QUOTA_TABLE_NAME
);
165 final TableName tn1
= helper
.createTableWithRegions(1);
166 admin
.setQuota(QuotaSettingsFactory
.limitTableSpace(
167 tn1
, SpaceQuotaHelperForTests
.ONE_GIGABYTE
, SpaceViolationPolicy
.NO_INSERTS
));
169 // Write some data and flush it
170 helper
.writeData(tn1
, 256L * SpaceQuotaHelperForTests
.ONE_KILOBYTE
);
173 // Create a snapshot on the table
174 final String snapshotName1
= tn1
+ "snapshot1";
175 admin
.snapshot(new SnapshotDescription(snapshotName1
, tn1
, SnapshotType
.SKIPFLUSH
));
177 FileArchiverNotifierImpl notifier
= new FileArchiverNotifierImpl(conn
, conf
, fs
, tn
);
178 long t1
= notifier
.getLastFullCompute();
179 long snapshotSize
= notifier
.computeAndStoreSnapshotSizes(Arrays
.asList(snapshotName1
));
180 assertEquals("The size of the snapshots should be zero", 0, snapshotSize
);
181 assertTrue("Last compute time was not less than current compute time",
182 t1
< notifier
.getLastFullCompute());
184 // No recently archived files and the snapshot should have no size
185 assertEquals(0, extractSnapshotSize(quotaTable
, tn
, snapshotName1
));
187 // Invoke the addArchivedFiles method with no files
188 notifier
.addArchivedFiles(Collections
.emptySet());
190 // The size should not have changed
191 assertEquals(0, extractSnapshotSize(quotaTable
, tn
, snapshotName1
));
193 notifier
.addArchivedFiles(ImmutableSet
.of(entry("a", 1024L), entry("b", 1024L)));
195 // The size should not have changed
196 assertEquals(0, extractSnapshotSize(quotaTable
, tn
, snapshotName1
));
198 // Pull one file referenced by the snapshot out of the manifest
199 Set
<String
> referencedFiles
= getFilesReferencedBySnapshot(snapshotName1
);
200 assertTrue("Found snapshot referenced files: " + referencedFiles
, referencedFiles
.size() >= 1);
201 String referencedFile
= Iterables
.getFirst(referencedFiles
, null);
202 assertNotNull(referencedFile
);
204 // Report that a file this snapshot referenced was moved to the archive. This is a sign
205 // that the snapshot should now "own" the size of this file
206 final long fakeFileSize
= 2048L;
207 notifier
.addArchivedFiles(ImmutableSet
.of(entry(referencedFile
, fakeFileSize
)));
209 // Verify that the snapshot owns this file.
210 assertEquals(fakeFileSize
, extractSnapshotSize(quotaTable
, tn
, snapshotName1
));
212 // In reality, we did not actually move the file, so a "full" computation should re-set the
213 // size of the snapshot back to 0.
214 long t2
= notifier
.getLastFullCompute();
215 snapshotSize
= notifier
.computeAndStoreSnapshotSizes(Arrays
.asList(snapshotName1
));
216 assertEquals(0, snapshotSize
);
217 assertEquals(0, extractSnapshotSize(quotaTable
, tn
, snapshotName1
));
218 // We should also have no recently archived files after a re-computation
219 assertTrue("Last compute time was not less than current compute time",
220 t2
< notifier
.getLastFullCompute());
224 public void testParseOldNamespaceSnapshotSize() throws Exception
{
225 final Admin admin
= TEST_UTIL
.getAdmin();
226 final TableName fakeQuotaTableName
= TableName
.valueOf(testName
.getMethodName());
227 final TableName tn
= TableName
.valueOf(testName
.getMethodName() + "1");
228 if (admin
.tableExists(fakeQuotaTableName
)) {
229 admin
.disableTable(fakeQuotaTableName
);
230 admin
.deleteTable(fakeQuotaTableName
);
232 TableDescriptor desc
= TableDescriptorBuilder
.newBuilder(fakeQuotaTableName
).setColumnFamily(
233 ColumnFamilyDescriptorBuilder
.of(QuotaTableUtil
.QUOTA_FAMILY_USAGE
))
234 .setColumnFamily(ColumnFamilyDescriptorBuilder
.of(QuotaUtil
.QUOTA_FAMILY_INFO
)).build();
235 admin
.createTable(desc
);
237 final String ns
= "";
238 try (Table fakeQuotaTable
= conn
.getTable(fakeQuotaTableName
)) {
239 FileArchiverNotifierImpl notifier
= new FileArchiverNotifierImpl(conn
, conf
, fs
, tn
);
240 // Verify no record is treated as zero
241 assertEquals(0, notifier
.getPreviousNamespaceSnapshotSize(fakeQuotaTable
, ns
));
243 // Set an explicit value of zero
244 fakeQuotaTable
.put(QuotaTableUtil
.createPutForNamespaceSnapshotSize(ns
, 0L));
245 assertEquals(0, notifier
.getPreviousNamespaceSnapshotSize(fakeQuotaTable
, ns
));
247 // Set a non-zero value
248 fakeQuotaTable
.put(QuotaTableUtil
.createPutForNamespaceSnapshotSize(ns
, 1024L));
249 assertEquals(1024L, notifier
.getPreviousNamespaceSnapshotSize(fakeQuotaTable
, ns
));
253 private long count(Table t
) throws IOException
{
254 try (ResultScanner rs
= t
.getScanner(new Scan())) {
256 for (Result r
: rs
) {
257 while (r
.advance()) {
265 private long extractSnapshotSize(
266 Table quotaTable
, TableName tn
, String snapshot
) throws IOException
{
267 Get g
= QuotaTableUtil
.makeGetForSnapshotSize(tn
, snapshot
);
268 Result r
= quotaTable
.get(g
);
270 CellScanner cs
= r
.cellScanner();
271 assertTrue(cs
.advance());
272 Cell c
= cs
.current();
274 return QuotaTableUtil
.extractSnapshotSize(
275 c
.getValueArray(), c
.getValueOffset(), c
.getValueLength());
278 private void verify(Table t
, IOThrowingRunnable test
) throws IOException
{
279 admin
.disableTable(t
.getName());
280 admin
.truncateTable(t
.getName(), false);
285 private interface IOThrowingRunnable
{
286 void run() throws IOException
;
289 private Set
<String
> getFilesReferencedBySnapshot(String snapshotName
) throws IOException
{
290 HashSet
<String
> files
= new HashSet
<>();
291 Path snapshotDir
= SnapshotDescriptionUtils
.getCompletedSnapshotDir(
292 snapshotName
, FSUtils
.getRootDir(conf
));
293 SnapshotProtos
.SnapshotDescription sd
= SnapshotDescriptionUtils
.readSnapshotInfo(
295 SnapshotManifest manifest
= SnapshotManifest
.open(conf
, fs
, snapshotDir
, sd
);
296 // For each region referenced by the snapshot
297 for (SnapshotRegionManifest rm
: manifest
.getRegionManifests()) {
298 // For each column family in this region
299 for (FamilyFiles ff
: rm
.getFamilyFilesList()) {
300 // And each store file in that family
301 for (StoreFile sf
: ff
.getStoreFilesList()) {
302 files
.add(sf
.getName());
309 private <K
,V
> Entry
<K
,V
> entry(K k
, V v
) {
310 return Maps
.immutableEntry(k
, v
);