HBASE-26921 Rewrite the counting cells part in TestMultiVersions (#4316)
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / regionserver / TestHRegionFileSystem.java
blob80d62b88299e66dff6e282b5344b2e8a2a71c658
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.regionserver;
20 import static org.junit.Assert.assertEquals;
21 import static org.junit.Assert.assertFalse;
22 import static org.junit.Assert.assertNotNull;
23 import static org.junit.Assert.assertNull;
24 import static org.junit.Assert.assertTrue;
26 import java.io.IOException;
27 import java.net.URI;
28 import java.util.Collection;
29 import java.util.List;
30 import org.apache.hadoop.conf.Configuration;
31 import org.apache.hadoop.fs.FSDataInputStream;
32 import org.apache.hadoop.fs.FSDataOutputStream;
33 import org.apache.hadoop.fs.FileStatus;
34 import org.apache.hadoop.fs.FileSystem;
35 import org.apache.hadoop.fs.Path;
36 import org.apache.hadoop.fs.permission.FsPermission;
37 import org.apache.hadoop.hbase.HBaseClassTestRule;
38 import org.apache.hadoop.hbase.HBaseTestingUtil;
39 import org.apache.hadoop.hbase.TableName;
40 import org.apache.hadoop.hbase.client.Admin;
41 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
42 import org.apache.hadoop.hbase.client.Connection;
43 import org.apache.hadoop.hbase.client.Put;
44 import org.apache.hadoop.hbase.client.RegionInfo;
45 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
46 import org.apache.hadoop.hbase.client.Table;
47 import org.apache.hadoop.hbase.fs.HFileSystem;
48 import org.apache.hadoop.hbase.testclassification.LargeTests;
49 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
50 import org.apache.hadoop.hbase.util.Bytes;
51 import org.apache.hadoop.hbase.util.CommonFSUtils;
52 import org.apache.hadoop.hbase.util.FSUtils;
53 import org.apache.hadoop.util.Progressable;
54 import org.junit.ClassRule;
55 import org.junit.Rule;
56 import org.junit.Test;
57 import org.junit.experimental.categories.Category;
58 import org.junit.rules.TestName;
59 import org.slf4j.Logger;
60 import org.slf4j.LoggerFactory;
62 @Category({RegionServerTests.class, LargeTests.class})
63 public class TestHRegionFileSystem {
65 @ClassRule
66 public static final HBaseClassTestRule CLASS_RULE =
67 HBaseClassTestRule.forClass(TestHRegionFileSystem.class);
69 private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
70 private static final Logger LOG = LoggerFactory.getLogger(TestHRegionFileSystem.class);
72 public static final byte[] FAMILY_NAME = Bytes.toBytes("info");
73 private static final byte[][] FAMILIES = {
74 Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")),
75 Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) };
76 private static final TableName TABLE_NAME = TableName.valueOf("TestTable");
78 @Rule
79 public TestName name = new TestName();
81 @Test
82 public void testBlockStoragePolicy() throws Exception {
83 TEST_UTIL = new HBaseTestingUtil();
84 Configuration conf = TEST_UTIL.getConfiguration();
85 TEST_UTIL.startMiniCluster();
86 Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
87 assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
88 HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
89 // the original block storage policy would be HOT
90 String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
91 String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
92 LOG.debug("Storage policy of cf 0: [" + spA + "].");
93 LOG.debug("Storage policy of cf 1: [" + spB + "].");
94 assertEquals("HOT", spA);
95 assertEquals("HOT", spB);
97 // Recreate table and make sure storage policy could be set through configuration
98 TEST_UTIL.shutdownMiniCluster();
99 TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
100 TEST_UTIL.startMiniCluster();
101 table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
102 regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
104 try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
105 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
106 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
107 LOG.debug("Storage policy of cf 0: [" + spA + "].");
108 LOG.debug("Storage policy of cf 1: [" + spB + "].");
109 assertEquals("WARM", spA);
110 assertEquals("WARM", spB);
112 // alter table cf schema to change storage policies
113 // and make sure it could override settings in conf
114 ColumnFamilyDescriptorBuilder cfdA =
115 ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]);
116 // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
117 cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
118 admin.modifyColumnFamily(TABLE_NAME, cfdA.build());
119 while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().
120 getRegionStates().hasRegionsInTransition()) {
121 Thread.sleep(200);
122 LOG.debug("Waiting on table to finish schema altering");
124 // alter through HColumnDescriptor#setStoragePolicy
125 ColumnFamilyDescriptorBuilder cfdB =
126 ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]);
127 cfdB.setStoragePolicy("ALL_SSD");
128 admin.modifyColumnFamily(TABLE_NAME, cfdB.build());
129 while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
130 .hasRegionsInTransition()) {
131 Thread.sleep(200);
132 LOG.debug("Waiting on table to finish schema altering");
134 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
135 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
136 LOG.debug("Storage policy of cf 0: [" + spA + "].");
137 LOG.debug("Storage policy of cf 1: [" + spB + "].");
138 assertNotNull(spA);
139 assertEquals("ONE_SSD", spA);
140 assertNotNull(spB);
141 assertEquals("ALL_SSD", spB);
143 // flush memstore snapshot into 3 files
144 for (long i = 0; i < 3; i++) {
145 Put put = new Put(Bytes.toBytes(i));
146 put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
147 table.put(put);
148 admin.flush(TABLE_NAME);
150 // there should be 3 files in store dir
151 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
152 Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
153 FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath);
154 assertNotNull(storeFiles);
155 assertEquals(3, storeFiles.length);
156 // store temp dir still exists but empty
157 Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
158 assertTrue(fs.exists(storeTempDir));
159 FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir);
160 assertNull(tempFiles);
161 // storage policy of cf temp dir and 3 store files should be ONE_SSD
162 assertEquals("ONE_SSD",
163 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
164 for (FileStatus status : storeFiles) {
165 assertEquals("ONE_SSD",
166 ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
169 // change storage policies by calling raw api directly
170 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
171 regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
172 spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
173 spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
174 LOG.debug("Storage policy of cf 0: [" + spA + "].");
175 LOG.debug("Storage policy of cf 1: [" + spB + "].");
176 assertNotNull(spA);
177 assertEquals("ALL_SSD", spA);
178 assertNotNull(spB);
179 assertEquals("ONE_SSD", spB);
180 } finally {
181 table.close();
182 TEST_UTIL.deleteTable(TABLE_NAME);
183 TEST_UTIL.shutdownMiniCluster();
187 private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf)
188 throws IOException {
189 FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
190 Path tableDir = CommonFSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName());
191 List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
192 assertEquals(1, regionDirs.size());
193 List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0));
194 assertEquals(2, familyDirs.size());
195 RegionInfo hri =
196 conn.getRegionLocator(table.getName()).getAllRegionLocations().get(0).getRegion();
197 HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri);
198 return regionFs;
201 @Test
202 public void testOnDiskRegionCreation() throws IOException {
203 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName());
204 FileSystem fs = TEST_UTIL.getTestFileSystem();
205 Configuration conf = TEST_UTIL.getConfiguration();
207 // Create a Region
208 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
209 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
210 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri);
212 // Verify if the region is on disk
213 Path regionDir = regionFs.getRegionDir();
214 assertTrue("The region folder should be created", fs.exists(regionDir));
216 // Verify the .regioninfo
217 RegionInfo hriVerify = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
218 assertEquals(hri, hriVerify);
220 // Open the region
221 regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
222 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri, false);
223 assertEquals(regionDir, regionFs.getRegionDir());
225 // Delete the region
226 HRegionFileSystem.deleteRegionFromFileSystem(conf, fs,
227 CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri);
228 assertFalse("The region folder should be removed", fs.exists(regionDir));
230 fs.delete(rootDir, true);
233 @Test
234 public void testNonIdempotentOpsWithRetries() throws IOException {
235 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS(name.getMethodName());
236 FileSystem fs = TEST_UTIL.getTestFileSystem();
237 Configuration conf = TEST_UTIL.getConfiguration();
239 // Create a Region
240 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
241 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri);
242 assertTrue(fs.exists(regionFs.getRegionDir()));
244 regionFs = new HRegionFileSystem(conf, new MockFileSystemForCreate(), rootDir, hri);
245 boolean result = regionFs.createDir(new Path("/foo/bar"));
246 assertTrue("Couldn't create the directory", result);
248 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri);
249 result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2"));
250 assertTrue("Couldn't rename the directory", result);
252 regionFs = new HRegionFileSystem(conf, new MockFileSystem(), rootDir, hri);
253 result = regionFs.deleteDir(new Path("/foo/bar"));
254 assertTrue("Couldn't delete the directory", result);
255 fs.delete(rootDir, true);
258 static class MockFileSystemForCreate extends MockFileSystem {
259 @Override
260 public boolean exists(Path path) {
261 return false;
266 * a mock fs which throws exception for first 3 times, and then process the call (returns the
267 * excepted result).
269 static class MockFileSystem extends FileSystem {
270 int retryCount;
271 final static int successRetryCount = 3;
273 public MockFileSystem() {
274 retryCount = 0;
277 @Override
278 public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException {
279 throw new IOException("");
282 @Override
283 public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
284 short arg4, long arg5, Progressable arg6) throws IOException {
285 LOG.debug("Create, " + retryCount);
286 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
287 return null;
290 @Override
291 public boolean delete(Path arg0) throws IOException {
292 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
293 return true;
296 @Override
297 public boolean delete(Path arg0, boolean arg1) throws IOException {
298 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
299 return true;
302 @Override
303 public FileStatus getFileStatus(Path arg0) throws IOException {
304 FileStatus fs = new FileStatus();
305 return fs;
308 @Override
309 public boolean exists(Path path) {
310 return true;
313 @Override
314 public URI getUri() {
315 throw new RuntimeException("Something bad happen");
318 @Override
319 public Path getWorkingDirectory() {
320 throw new RuntimeException("Something bad happen");
323 @Override
324 public FileStatus[] listStatus(Path arg0) throws IOException {
325 throw new IOException("Something bad happen");
328 @Override
329 public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException {
330 LOG.debug("mkdirs, " + retryCount);
331 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
332 return true;
335 @Override
336 public FSDataInputStream open(Path arg0, int arg1) throws IOException {
337 throw new IOException("Something bad happen");
340 @Override
341 public boolean rename(Path arg0, Path arg1) throws IOException {
342 LOG.debug("rename, " + retryCount);
343 if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
344 return true;
347 @Override
348 public void setWorkingDirectory(Path arg0) {
349 throw new RuntimeException("Something bad happen");
353 @Test
354 public void testTempAndCommit() throws IOException {
355 Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit");
356 FileSystem fs = TEST_UTIL.getTestFileSystem();
357 Configuration conf = TEST_UTIL.getConfiguration();
359 // Create a Region
360 String familyName = "cf";
362 RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
363 HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri);
365 // New region, no store files
366 Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
367 assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
369 // Create a new file in temp (no files in the family)
370 Path buildPath = regionFs.createTempName();
371 fs.createNewFile(buildPath);
372 storeFiles = regionFs.getStoreFiles(familyName);
373 assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
375 // commit the file
376 Path dstPath = regionFs.commitStoreFile(familyName, buildPath);
377 storeFiles = regionFs.getStoreFiles(familyName);
378 assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
379 assertFalse(fs.exists(buildPath));
381 fs.delete(rootDir, true);