3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.master
;
21 import java
.io
.FileNotFoundException
;
22 import java
.io
.IOException
;
23 import org
.apache
.hadoop
.conf
.Configuration
;
24 import org
.apache
.hadoop
.fs
.FileStatus
;
25 import org
.apache
.hadoop
.fs
.FileSystem
;
26 import org
.apache
.hadoop
.fs
.Path
;
27 import org
.apache
.hadoop
.fs
.permission
.FsAction
;
28 import org
.apache
.hadoop
.fs
.permission
.FsPermission
;
29 import org
.apache
.hadoop
.hbase
.ClusterId
;
30 import org
.apache
.hadoop
.hbase
.HConstants
;
31 import org
.apache
.hadoop
.hbase
.backup
.HFileArchiver
;
32 import org
.apache
.hadoop
.hbase
.client
.RegionInfo
;
33 import org
.apache
.hadoop
.hbase
.exceptions
.DeserializationException
;
34 import org
.apache
.hadoop
.hbase
.fs
.HFileSystem
;
35 import org
.apache
.hadoop
.hbase
.log
.HBaseMarkers
;
36 import org
.apache
.hadoop
.hbase
.mob
.MobConstants
;
37 import org
.apache
.hadoop
.hbase
.replication
.ReplicationUtils
;
38 import org
.apache
.hadoop
.hbase
.security
.access
.SnapshotScannerHDFSAclHelper
;
39 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
40 import org
.apache
.hadoop
.hbase
.util
.CommonFSUtils
;
41 import org
.apache
.hadoop
.hbase
.util
.FSUtils
;
42 import org
.apache
.yetus
.audience
.InterfaceAudience
;
43 import org
.slf4j
.Logger
;
44 import org
.slf4j
.LoggerFactory
;
47 * This class abstracts a bunch of operations the HMaster needs to interact with
48 * the underlying file system like creating the initial layout, checking file
51 @InterfaceAudience.Private
52 public class MasterFileSystem
{
53 private static final Logger LOG
= LoggerFactory
.getLogger(MasterFileSystem
.class);
55 /** Parameter name for HBase instance root directory permission*/
56 public static final String HBASE_DIR_PERMS
= "hbase.rootdir.perms";
58 /** Parameter name for HBase WAL directory permission*/
59 public static final String HBASE_WAL_DIR_PERMS
= "hbase.wal.dir.perms";
61 // HBase configuration
62 private final Configuration conf
;
63 // Persisted unique cluster ID
64 private ClusterId clusterId
;
65 // Keep around for convenience.
66 private final FileSystem fs
;
67 // Keep around for convenience.
68 private final FileSystem walFs
;
69 // root log directory on the FS
70 private final Path rootdir
;
71 // hbase temp directory used for table construction and deletion
72 private final Path tempdir
;
73 // root hbase directory on the FS
74 private final Path walRootDir
;
78 * In a secure env, the protected sub-directories and files under the HBase rootDir
79 * would be restricted. The sub-directory will have '700' except the bulk load staging dir,
80 * which will have '711'. The default '700' can be overwritten by setting the property
81 * 'hbase.rootdir.perms'. The protected files (version file, clusterId file) will have '600'.
82 * The rootDir itself will be created with HDFS default permissions if it does not exist.
83 * We will check the rootDir permissions to make sure it has 'x' for all to ensure access
84 * to the staging dir. If it does not, we will add it.
86 // Permissions for the directories under rootDir that need protection
87 private final FsPermission secureRootSubDirPerms
;
88 // Permissions for the files under rootDir that need protection
89 private final FsPermission secureRootFilePerms
= new FsPermission("600");
90 // Permissions for bulk load staging directory under rootDir
91 private final FsPermission HiddenDirPerms
= FsPermission
.valueOf("-rwx--x--x");
93 private boolean isSecurityEnabled
;
95 public MasterFileSystem(Configuration conf
) throws IOException
{
97 // Set filesystem to be that of this.rootdir else we get complaints about
98 // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
99 // default localfs. Presumption is that rootdir is fully-qualified before
100 // we get to here with appropriate fs scheme.
101 this.rootdir
= CommonFSUtils
.getRootDir(conf
);
102 this.tempdir
= new Path(this.rootdir
, HConstants
.HBASE_TEMP_DIRECTORY
);
103 // Cover both bases, the old way of setting default fs and the new.
104 // We're supposed to run on 0.20 and 0.21 anyways.
105 this.fs
= this.rootdir
.getFileSystem(conf
);
106 this.walRootDir
= CommonFSUtils
.getWALRootDir(conf
);
107 this.walFs
= CommonFSUtils
.getWALFileSystem(conf
);
108 CommonFSUtils
.setFsDefault(conf
, new Path(this.walFs
.getUri()));
110 CommonFSUtils
.setFsDefault(conf
, new Path(this.fs
.getUri()));
111 // make sure the fs has the same conf
113 this.secureRootSubDirPerms
= new FsPermission(conf
.get("hbase.rootdir.perms", "700"));
114 this.isSecurityEnabled
= "kerberos".equalsIgnoreCase(conf
.get("hbase.security.authentication"));
115 // setup the filesystem variable
116 createInitialFileSystemLayout();
117 HFileSystem
.addLocationsOrderInterceptor(conf
);
121 * Create initial layout in filesystem.
123 * <li>Check if the meta region exists and is readable, if not create it.
124 * Create hbase.version and the hbase:meta directory if not one.
129 private void createInitialFileSystemLayout() throws IOException
{
130 final String
[] protectedSubDirs
= new String
[] {
131 HConstants
.BASE_NAMESPACE_DIR
,
132 HConstants
.HFILE_ARCHIVE_DIRECTORY
,
133 HConstants
.HBCK_SIDELINEDIR_NAME
,
134 MobConstants
.MOB_DIR_NAME
137 //With the introduction of RegionProcedureStore,
138 // there's no need to create MasterProcWAL dir here anymore. See HBASE-23715
139 final String
[] protectedSubLogDirs
= new String
[] {
140 HConstants
.HREGION_LOGDIR_NAME
,
141 HConstants
.HREGION_OLDLOGDIR_NAME
,
142 HConstants
.CORRUPT_DIR_NAME
,
143 ReplicationUtils
.REMOTE_WAL_DIR_NAME
145 // check if the root directory exists
146 checkRootDir(this.rootdir
, conf
, this.fs
);
148 // Check the directories under rootdir.
149 checkTempDir(this.tempdir
, conf
, this.fs
);
150 for (String subDir
: protectedSubDirs
) {
151 checkSubDir(new Path(this.rootdir
, subDir
), HBASE_DIR_PERMS
);
155 if (!this.walRootDir
.equals(this.rootdir
)) {
156 perms
= HBASE_WAL_DIR_PERMS
;
158 perms
= HBASE_DIR_PERMS
;
160 for (String subDir
: protectedSubLogDirs
) {
161 checkSubDir(new Path(this.walRootDir
, subDir
), perms
);
166 // Handle the last few special files and set the final rootDir permissions
167 // rootDir needs 'x' for all to support bulk load staging dir
168 if (isSecurityEnabled
) {
169 fs
.setPermission(new Path(rootdir
, HConstants
.VERSION_FILE_NAME
), secureRootFilePerms
);
170 fs
.setPermission(new Path(rootdir
, HConstants
.CLUSTER_ID_FILE_NAME
), secureRootFilePerms
);
172 FsPermission currentRootPerms
= fs
.getFileStatus(this.rootdir
).getPermission();
173 if (!currentRootPerms
.getUserAction().implies(FsAction
.EXECUTE
)
174 || !currentRootPerms
.getGroupAction().implies(FsAction
.EXECUTE
)
175 || !currentRootPerms
.getOtherAction().implies(FsAction
.EXECUTE
)) {
176 LOG
.warn("rootdir permissions do not contain 'excute' for user, group or other. "
177 + "Automatically adding 'excute' permission for all");
180 new FsPermission(currentRootPerms
.getUserAction().or(FsAction
.EXECUTE
), currentRootPerms
181 .getGroupAction().or(FsAction
.EXECUTE
), currentRootPerms
.getOtherAction().or(
186 public FileSystem
getFileSystem() {
190 public FileSystem
getWALFileSystem() {
194 public Configuration
getConfiguration() {
199 * @return HBase root dir.
201 public Path
getRootDir() {
206 * @return HBase root log dir.
208 public Path
getWALRootDir() {
209 return this.walRootDir
;
213 * @return the directory for a give {@code region}.
215 public Path
getRegionDir(RegionInfo region
) {
216 return FSUtils
.getRegionDirFromRootDir(getRootDir(), region
);
220 * @return HBase temp dir.
222 public Path
getTempDir() {
227 * @return The unique identifier generated for this cluster
229 public ClusterId
getClusterId() {
234 * Get the rootdir. Make sure its wholesome and exists before returning.
235 * @return hbase.rootdir (after checks for existence and bootstrapping if needed populating the
236 * directory with necessary bootup files).
238 private void checkRootDir(final Path rd
, final Configuration c
, final FileSystem fs
)
240 int threadWakeFrequency
= c
.getInt(HConstants
.THREAD_WAKE_FREQUENCY
, 10 * 1000);
241 // If FS is in safe mode wait till out of it.
242 FSUtils
.waitOnSafeMode(c
, threadWakeFrequency
);
244 // Filesystem is good. Go ahead and check for hbase.rootdir.
247 status
= fs
.getFileStatus(rd
);
248 } catch (FileNotFoundException e
) {
251 int versionFileWriteAttempts
= c
.getInt(HConstants
.VERSION_FILE_WRITE_ATTEMPTS
,
252 HConstants
.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS
);
254 if (status
== null) {
255 if (!fs
.mkdirs(rd
)) {
256 throw new IOException("Can not create configured '" + HConstants
.HBASE_DIR
+ "' " + rd
);
258 // DFS leaves safe mode with 0 DNs when there are 0 blocks.
259 // We used to handle this by checking the current DN count and waiting until
260 // it is nonzero. With security, the check for datanode count doesn't work --
261 // it is a privileged op. So instead we adopt the strategy of the jobtracker
262 // and simply retry file creation during bootstrap indefinitely. As soon as
263 // there is one datanode it will succeed. Permission problems should have
264 // already been caught by mkdirs above.
265 FSUtils
.setVersion(fs
, rd
, threadWakeFrequency
, versionFileWriteAttempts
);
267 if (!status
.isDirectory()) {
268 throw new IllegalArgumentException(
269 "Configured '" + HConstants
.HBASE_DIR
+ "' " + rd
+ " is not a directory.");
272 FSUtils
.checkVersion(fs
, rd
, true, threadWakeFrequency
, versionFileWriteAttempts
);
274 } catch (DeserializationException de
) {
275 LOG
.error(HBaseMarkers
.FATAL
, "Please fix invalid configuration for '{}' {}",
276 HConstants
.HBASE_DIR
, rd
, de
);
277 throw new IOException(de
);
278 } catch (IllegalArgumentException iae
) {
279 LOG
.error(HBaseMarkers
.FATAL
, "Please fix invalid configuration for '{}' {}",
280 HConstants
.HBASE_DIR
, rd
, iae
);
283 // Make sure cluster ID exists
284 if (!FSUtils
.checkClusterIdExists(fs
, rd
, threadWakeFrequency
)) {
285 FSUtils
.setClusterId(fs
, rd
, new ClusterId(), threadWakeFrequency
);
287 clusterId
= FSUtils
.getClusterId(fs
, rd
);
291 * Make sure the hbase temp directory exists and is empty.
292 * NOTE that this method is only executed once just after the master becomes the active one.
294 void checkTempDir(final Path tmpdir
, final Configuration c
, final FileSystem fs
)
296 // If the temp directory exists, clear the content (left over, from the previous run)
297 if (fs
.exists(tmpdir
)) {
298 // Archive table in temp, maybe left over from failed deletion,
299 // if not the cleaner will take care of them.
300 for (Path tableDir
: FSUtils
.getTableDirs(fs
, tmpdir
)) {
301 HFileArchiver
.archiveRegions(c
, fs
, this.rootdir
, tableDir
,
302 FSUtils
.getRegionDirs(fs
, tableDir
));
303 if (!FSUtils
.getRegionDirs(fs
, tableDir
).isEmpty()) {
304 LOG
.warn("Found regions in tmp dir after archiving table regions, {}", tableDir
);
307 // if acl sync to hdfs is enabled, then skip delete tmp dir because ACLs are set
308 if (!SnapshotScannerHDFSAclHelper
.isAclSyncToHdfsEnabled(c
) && !fs
.delete(tmpdir
, true)) {
309 throw new IOException("Unable to clean the temp directory: " + tmpdir
);
313 // Create the temp directory
314 if (!fs
.exists(tmpdir
)) {
315 if (isSecurityEnabled
) {
316 if (!fs
.mkdirs(tmpdir
, secureRootSubDirPerms
)) {
317 throw new IOException("HBase temp directory '" + tmpdir
+ "' creation failure.");
320 if (!fs
.mkdirs(tmpdir
)) {
321 throw new IOException("HBase temp directory '" + tmpdir
+ "' creation failure.");
328 * Make sure the directories under rootDir have good permissions. Create if necessary.
330 * @throws IOException
332 private void checkSubDir(final Path p
, final String dirPermsConfName
) throws IOException
{
333 FileSystem fs
= p
.getFileSystem(conf
);
334 FsPermission dirPerms
= new FsPermission(conf
.get(dirPermsConfName
, "700"));
336 if (isSecurityEnabled
) {
337 if (!fs
.mkdirs(p
, secureRootSubDirPerms
)) {
338 throw new IOException("HBase directory '" + p
+ "' creation failure.");
342 throw new IOException("HBase directory '" + p
+ "' creation failure.");
347 if (isSecurityEnabled
&& !dirPerms
.equals(fs
.getFileStatus(p
).getPermission())) {
348 // check whether the permission match
349 LOG
.warn("Found HBase directory permissions NOT matching expected permissions for "
350 + p
.toString() + " permissions=" + fs
.getFileStatus(p
).getPermission()
351 + ", expecting " + dirPerms
+ ". Automatically setting the permissions. "
352 + "You can change the permissions by setting \"" + dirPermsConfName
+ "\" in hbase-site.xml "
353 + "and restarting the master");
354 fs
.setPermission(p
, dirPerms
);
360 * Check permissions for bulk load staging directory. This directory has special hidden
361 * permissions. Create it if necessary.
362 * @throws IOException
364 private void checkStagingDir() throws IOException
{
365 Path p
= new Path(this.rootdir
, HConstants
.BULKLOAD_STAGING_DIR_NAME
);
367 if (!this.fs
.exists(p
)) {
368 if (!this.fs
.mkdirs(p
, HiddenDirPerms
)) {
369 throw new IOException("Failed to create staging directory " + p
.toString());
372 this.fs
.setPermission(p
, HiddenDirPerms
);
374 } catch (IOException e
) {
375 LOG
.error("Failed to create or set permission on staging directory " + p
.toString());
376 throw new IOException("Failed to create or set permission on staging directory "
381 public void deleteFamilyFromFS(RegionInfo region
, byte[] familyName
)
383 deleteFamilyFromFS(rootdir
, region
, familyName
);
386 public void deleteFamilyFromFS(Path rootDir
, RegionInfo region
, byte[] familyName
)
388 // archive family store files
389 Path tableDir
= CommonFSUtils
.getTableDir(rootDir
, region
.getTable());
390 HFileArchiver
.archiveFamily(fs
, conf
, region
, tableDir
, familyName
);
392 // delete the family folder
393 Path familyDir
= new Path(tableDir
,
394 new Path(region
.getEncodedName(), Bytes
.toString(familyName
)));
395 if (fs
.delete(familyDir
, true) == false) {
396 if (fs
.exists(familyDir
)) {
397 throw new IOException("Could not delete family "
398 + Bytes
.toString(familyName
) + " from FileSystem for region "
399 + region
.getRegionNameAsString() + "(" + region
.getEncodedName()
408 public void logFileSystemState(Logger log
) throws IOException
{
409 CommonFSUtils
.logFileSystemState(fs
, rootdir
, log
);