HBASE-23723 Ensure MOB compaction works in optimized mode after snapshot clone (...
[hbase.git] / hbase-server / src / test / java / org / apache / hadoop / hbase / util / TestFSHDFSUtils.java
blob039019967b17b6a636b154b267ec74776cb2ef08
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.util;
20 import static org.junit.Assert.assertTrue;
22 import java.io.IOException;
23 import org.apache.hadoop.conf.Configuration;
24 import org.apache.hadoop.fs.FileSystem;
25 import org.apache.hadoop.fs.Path;
26 import org.apache.hadoop.hbase.HBaseClassTestRule;
27 import org.apache.hadoop.hbase.HBaseConfiguration;
28 import org.apache.hadoop.hbase.HBaseTestingUtility;
29 import org.apache.hadoop.hbase.testclassification.MediumTests;
30 import org.apache.hadoop.hbase.testclassification.MiscTests;
31 import org.apache.hadoop.hdfs.DistributedFileSystem;
32 import org.junit.Before;
33 import org.junit.ClassRule;
34 import org.junit.Test;
35 import org.junit.experimental.categories.Category;
36 import org.mockito.Mockito;
37 import org.slf4j.Logger;
38 import org.slf4j.LoggerFactory;
40 /**
41 * Test our recoverLease loop against mocked up filesystem.
43 @Category({MiscTests.class, MediumTests.class})
44 public class TestFSHDFSUtils {
46 @ClassRule
47 public static final HBaseClassTestRule CLASS_RULE =
48 HBaseClassTestRule.forClass(TestFSHDFSUtils.class);
50 private static final Logger LOG = LoggerFactory.getLogger(TestFSHDFSUtils.class);
51 private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
52 static {
53 Configuration conf = HTU.getConfiguration();
54 conf.setInt("hbase.lease.recovery.first.pause", 10);
55 conf.setInt("hbase.lease.recovery.pause", 10);
57 private FSHDFSUtils fsHDFSUtils = new FSHDFSUtils();
58 private static Path FILE = new Path(HTU.getDataTestDir(), "file.txt");
59 long startTime = -1;
61 @Before
62 public void setup() {
63 this.startTime = EnvironmentEdgeManager.currentTime();
66 /**
67 * Test recover lease eventually succeeding.
69 @Test
70 public void testRecoverLease() throws IOException {
71 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000);
72 CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
73 Mockito.when(reporter.progress()).thenReturn(true);
74 DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class);
75 // Fail four times and pass on the fifth.
76 Mockito.when(dfs.recoverLease(FILE)).
77 thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true);
78 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
79 Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE);
80 // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two
81 // invocations will happen pretty fast... the we fall into the longer wait loop).
82 assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) >
83 (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000)));
86 /**
87 * Test that isFileClosed makes us recover lease faster.
89 @Test
90 public void testIsFileClosed() throws IOException {
91 // Make this time long so it is plain we broke out because of the isFileClosed invocation.
92 HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000);
93 CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class);
94 Mockito.when(reporter.progress()).thenReturn(true);
95 IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class);
96 // Now make it so we fail the first two times -- the two fast invocations, then we fall into
97 // the long loop during which we will call isFileClosed.... the next invocation should
98 // therefore return true if we are to break the loop.
99 Mockito.when(dfs.recoverLease(FILE)).
100 thenReturn(false).thenReturn(false).thenReturn(true);
101 Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true);
102 assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter));
103 Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE);
104 Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE);
107 void testIsSameHdfs(int nnport) throws IOException {
108 try {
109 Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil");
110 dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class);
111 } catch (Exception e) {
112 LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version.");
113 return;
116 Configuration conf = HBaseConfiguration.create();
117 Path srcPath = new Path("hdfs://localhost:" + nnport + "/");
118 Path desPath = new Path("hdfs://127.0.0.1/");
119 FileSystem srcFs = srcPath.getFileSystem(conf);
120 FileSystem desFs = desPath.getFileSystem(conf);
122 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
124 desPath = new Path("hdfs://127.0.0.1:8070/");
125 desFs = desPath.getFileSystem(conf);
126 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
128 desPath = new Path("hdfs://127.0.1.1:" + nnport + "/");
129 desFs = desPath.getFileSystem(conf);
130 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
132 conf.set("fs.defaultFS", "hdfs://haosong-hadoop");
133 conf.set("dfs.nameservices", "haosong-hadoop");
134 conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2");
135 conf.set("dfs.client.failover.proxy.provider.haosong-hadoop",
136 "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
138 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:"+ nnport);
139 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000");
140 desPath = new Path("/");
141 desFs = desPath.getFileSystem(conf);
142 assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
144 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:"+nnport);
145 conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000");
146 desPath = new Path("/");
147 desFs = desPath.getFileSystem(conf);
148 assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
151 @Test
152 public void testIsSameHdfs() throws IOException {
153 String hadoopVersion = org.apache.hadoop.util.VersionInfo.getVersion();
154 LOG.info("hadoop version is: " + hadoopVersion);
155 boolean isHadoop3_0_0 = hadoopVersion.startsWith("3.0.0");
156 if (isHadoop3_0_0) {
157 // Hadoop 3.0.0 alpha1+ ~ 3.0.0 GA changed default nn port to 9820.
158 // See HDFS-9427
159 testIsSameHdfs(9820);
160 } else {
161 // pre hadoop 3.0.0 defaults to port 8020
162 // Hadoop 3.0.1 changed it back to port 8020. See HDFS-12990
163 testIsSameHdfs(8020);
168 * Version of DFS that has HDFS-4525 in it.
170 static class IsFileClosedDistributedFileSystem extends DistributedFileSystem {
172 * Close status of a file. Copied over from HDFS-4525
173 * @return true if file is already closed
175 @Override
176 public boolean isFileClosed(Path f) throws IOException{
177 return false;