3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.regionserver
;
21 import java
.io
.IOException
;
22 import java
.lang
.reflect
.Field
;
23 import java
.util
.HashMap
;
26 import org
.apache
.yetus
.audience
.InterfaceAudience
;
27 import org
.slf4j
.Logger
;
28 import org
.slf4j
.LoggerFactory
;
29 import org
.apache
.hadoop
.conf
.Configuration
;
30 import org
.apache
.hadoop
.fs
.FileSystem
;
31 import org
.apache
.hadoop
.hbase
.HBaseConfiguration
;
32 import org
.apache
.hadoop
.hbase
.Stoppable
;
33 import org
.apache
.hadoop
.hbase
.log
.HBaseMarkers
;
34 import org
.apache
.hadoop
.hbase
.util
.ShutdownHookManager
;
35 import org
.apache
.hadoop
.hbase
.util
.Threads
;
38 * Manage regionserver shutdown hooks.
39 * @see #install(Configuration, FileSystem, Stoppable, Thread)
41 @InterfaceAudience.Private
42 public class ShutdownHook
{
43 private static final Logger LOG
= LoggerFactory
.getLogger(ShutdownHook
.class);
44 private static final String CLIENT_FINALIZER_DATA_METHOD
= "clientFinalizer";
47 * Key for boolean configuration whose default is true.
49 public static final String RUN_SHUTDOWN_HOOK
= "hbase.shutdown.hook";
52 * Key for a long configuration on how much time to wait on the fs shutdown
53 * hook. Default is 30 seconds.
55 public static final String FS_SHUTDOWN_HOOK_WAIT
= "hbase.fs.shutdown.hook.wait";
58 * A place for keeping track of all the filesystem shutdown hooks that need
59 * to be executed after the last regionserver referring to a given filesystem
60 * stops. We keep track of the # of regionserver references in values of the map.
62 private final static Map
<Runnable
, Integer
> fsShutdownHooks
= new HashMap
<>();
65 * Install a shutdown hook that calls stop on the passed Stoppable
66 * and then thread joins against the passed <code>threadToJoin</code>.
67 * When this thread completes, it then runs the hdfs thread (This install
68 * removes the hdfs shutdown hook keeping a handle on it to run it after
69 * <code>threadToJoin</code> has stopped).
71 * <p>To suppress all shutdown hook handling -- both the running of the
72 * regionserver hook and of the hdfs hook code -- set
73 * {@link ShutdownHook#RUN_SHUTDOWN_HOOK} in {@link Configuration} to
75 * This configuration value is checked when the hook code runs.
77 * @param fs Instance of Filesystem used by the RegionServer
78 * @param stop Installed shutdown hook will call stop against this passed
79 * <code>Stoppable</code> instance.
80 * @param threadToJoin After calling stop on <code>stop</code> will then
83 public static void install(final Configuration conf
, final FileSystem fs
,
84 final Stoppable stop
, final Thread threadToJoin
) {
85 Runnable fsShutdownHook
= suppressHdfsShutdownHook(fs
);
86 Thread t
= new ShutdownHookThread(conf
, stop
, threadToJoin
, fsShutdownHook
);
87 ShutdownHookManager
.affixShutdownHook(t
, 0);
88 LOG
.debug("Installed shutdown hook thread: " + t
.getName());
92 * Thread run by shutdown hook.
94 private static class ShutdownHookThread
extends Thread
{
95 private final Stoppable stop
;
96 private final Thread threadToJoin
;
97 private final Runnable fsShutdownHook
;
98 private final Configuration conf
;
100 ShutdownHookThread(final Configuration conf
, final Stoppable stop
,
101 final Thread threadToJoin
, final Runnable fsShutdownHook
) {
102 super("Shutdownhook:" + threadToJoin
.getName());
104 this.threadToJoin
= threadToJoin
;
106 this.fsShutdownHook
= fsShutdownHook
;
111 boolean b
= this.conf
.getBoolean(RUN_SHUTDOWN_HOOK
, true);
112 LOG
.info("Shutdown hook starting; " + RUN_SHUTDOWN_HOOK
+ "=" + b
+
113 "; fsShutdownHook=" + this.fsShutdownHook
);
115 this.stop
.stop("Shutdown hook");
116 Threads
.shutdown(this.threadToJoin
);
117 if (this.fsShutdownHook
!= null) {
118 synchronized (fsShutdownHooks
) {
119 int refs
= fsShutdownHooks
.get(fsShutdownHook
);
121 LOG
.info("Starting fs shutdown hook thread.");
122 Thread fsShutdownHookThread
= (fsShutdownHook
instanceof Thread
) ?
123 (Thread
)fsShutdownHook
: new Thread(fsShutdownHook
,
124 fsShutdownHook
.getClass().getSimpleName() + "-shutdown-hook");
125 fsShutdownHookThread
.start();
126 Threads
.shutdown(fsShutdownHookThread
,
127 this.conf
.getLong(FS_SHUTDOWN_HOOK_WAIT
, 30000));
130 fsShutdownHooks
.put(fsShutdownHook
, refs
- 1);
135 LOG
.info("Shutdown hook finished.");
140 * So, HDFS keeps a static map of all FS instances. In order to make sure
141 * things are cleaned up on our way out, it also creates a shutdown hook
142 * so that all filesystems can be closed when the process is terminated; it
143 * calls FileSystem.closeAll. This inconveniently runs concurrently with our
144 * own shutdown handler, and therefore causes all the filesystems to be closed
145 * before the server can do all its necessary cleanup.
147 * <p>The dirty reflection in this method sneaks into the FileSystem class
148 * and grabs the shutdown hook, removes it from the list of active shutdown
149 * hooks, and returns the hook for the caller to run at its convenience.
151 * <p>This seems quite fragile and susceptible to breaking if Hadoop changes
152 * anything about the way this cleanup is managed. Keep an eye on things.
153 * @return The fs shutdown hook
154 * @throws RuntimeException if we fail to find or grap the shutdown hook.
156 private static Runnable
suppressHdfsShutdownHook(final FileSystem fs
) {
158 // This introspection has been updated to work for hadoop 0.20, 0.21 and for
159 // cloudera 0.20. 0.21 and cloudera 0.20 both have hadoop-4829. With the
160 // latter in place, things are a little messy in that there are now two
161 // instances of the data member clientFinalizer; an uninstalled one in
162 // FileSystem and one in the innner class named Cache that actually gets
163 // registered as a shutdown hook. If the latter is present, then we are
164 // on 0.21 or cloudera patched 0.20.
165 Runnable hdfsClientFinalizer
= null;
166 // Look into the FileSystem#Cache class for clientFinalizer
167 Class
<?
> [] classes
= FileSystem
.class.getDeclaredClasses();
168 Class
<?
> cache
= null;
169 for (Class
<?
> c
: classes
) {
170 if (c
.getSimpleName().equals("Cache")) {
177 throw new RuntimeException(
178 "This should not happen. Could not find the cache class in FileSystem.");
183 field
= cache
.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD
);
184 } catch (NoSuchFieldException e
) {
185 // We can get here if the Cache class does not have a clientFinalizer
186 // instance: i.e. we're running on straight 0.20 w/o hadoop-4829.
189 field
.setAccessible(true);
190 Field cacheField
= FileSystem
.class.getDeclaredField("CACHE");
191 cacheField
.setAccessible(true);
192 Object cacheInstance
= cacheField
.get(fs
);
193 hdfsClientFinalizer
= (Runnable
)field
.get(cacheInstance
);
195 // Then we didnt' find clientFinalizer in Cache. Presume clean 0.20 hadoop.
196 field
= FileSystem
.class.getDeclaredField(CLIENT_FINALIZER_DATA_METHOD
);
197 field
.setAccessible(true);
198 hdfsClientFinalizer
= (Runnable
)field
.get(null);
200 if (hdfsClientFinalizer
== null) {
201 throw new RuntimeException("Client finalizer is null, can't suppress!");
203 synchronized (fsShutdownHooks
) {
204 boolean isFSCacheDisabled
= fs
.getConf().getBoolean("fs.hdfs.impl.disable.cache", false);
205 if (!isFSCacheDisabled
&& !fsShutdownHooks
.containsKey(hdfsClientFinalizer
)
206 && !ShutdownHookManager
.deleteShutdownHook(hdfsClientFinalizer
)) {
207 throw new RuntimeException(
208 "Failed suppression of fs shutdown hook: " + hdfsClientFinalizer
);
210 Integer refs
= fsShutdownHooks
.get(hdfsClientFinalizer
);
211 fsShutdownHooks
.put(hdfsClientFinalizer
, refs
== null ?
1 : refs
+ 1);
213 return hdfsClientFinalizer
;
214 } catch (NoSuchFieldException nsfe
) {
215 LOG
.error(HBaseMarkers
.FATAL
, "Couldn't find field 'clientFinalizer' in FileSystem!",
217 throw new RuntimeException("Failed to suppress HDFS shutdown hook");
218 } catch (IllegalAccessException iae
) {
219 LOG
.error(HBaseMarkers
.FATAL
, "Couldn't access field 'clientFinalizer' in FileSystem!",
221 throw new RuntimeException("Failed to suppress HDFS shutdown hook");
225 // Thread that does nothing. Used in below main testing.
226 static class DoNothingThread
extends Thread
{
236 // Stoppable with nothing to stop. Used below in main testing.
237 static class DoNothingStoppable
implements Stoppable
{
239 public boolean isStopped() {
240 // TODO Auto-generated method stub
245 public void stop(String why
) {
246 // TODO Auto-generated method stub
251 * Main to test basic functionality. Run with clean hadoop 0.20 and hadoop
252 * 0.21 and cloudera patched hadoop to make sure our shutdown hook handling
253 * works for all compbinations.
254 * Pass '-Dhbase.shutdown.hook=false' to test turning off the running of
257 * @throws IOException
259 public static void main(final String
[] args
) throws IOException
{
260 Configuration conf
= HBaseConfiguration
.create();
261 String prop
= System
.getProperty(RUN_SHUTDOWN_HOOK
);
263 conf
.setBoolean(RUN_SHUTDOWN_HOOK
, Boolean
.parseBoolean(prop
));
265 // Instantiate a FileSystem. This will register the fs shutdown hook.
266 FileSystem fs
= FileSystem
.get(conf
);
267 Thread donothing
= new DoNothingThread();
269 ShutdownHook
.install(conf
, fs
, new DoNothingStoppable(), donothing
);