2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
19 # Add or remove servers from draining mode via zookeeper
20 # Deprecated in 2.0, and will be removed in 3.0. Use Admin decommission
26 java_import org.apache.hadoop.hbase.HBaseConfiguration
27 java_import org.apache.hadoop.hbase.client.ConnectionFactory
28 java_import org.apache.hadoop.hbase.client.HBaseAdmin
29 java_import org.apache.hadoop.hbase.zookeeper.ZKUtil
30 java_import org.slf4j.LoggerFactory
33 NAME = 'draining_servers'.freeze
35 # Do command-line parsing
37 optparse = OptionParser.new do |opts|
38 opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
39 opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' \
40 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
41 opts.on('-h', '--help', 'Display usage information') do
48 # Return array of servernames where servername is hostname+port+startcode
51 serverInfos = admin.getClusterStatus.getServers
53 for server in serverInfos
54 servers << server.getServerName
59 def getServerNames(hostOrServers, config)
61 connection = ConnectionFactory.createConnection(config)
63 for hostOrServer in hostOrServers
64 # check whether it is already serverName. No need to connect to cluster
65 parts = hostOrServer.split(',')
69 admin = connection.getAdmin unless admin
70 servers = getServers(admin)
72 hostOrServer = hostOrServer.tr(':', ',')
74 ret << server if server.start_with?(hostOrServer)
84 def addServers(_options, hostOrServers)
85 config = HBaseConfiguration.create
86 servers = getServerNames(hostOrServers, config)
88 zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
89 parentZnode = zkw.znodePaths.drainingZNode
93 node = ZKUtil.joinZNode(parentZnode, server)
94 ZKUtil.createAndFailSilent(zkw, node)
101 def removeServers(_options, hostOrServers)
102 config = HBaseConfiguration.create
103 servers = getServerNames(hostOrServers, config)
105 zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
106 parentZnode = zkw.znodePaths.drainingZNode
109 for server in servers
110 node = ZKUtil.joinZNode(parentZnode, server)
111 ZKUtil.deleteNodeFailSilent(zkw, node)
118 # list servers in draining mode
119 def listServers(_options)
120 config = HBaseConfiguration.create
122 zkw = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new(config, 'draining_servers', nil)
123 parentZnode = zkw.znodePaths.drainingZNode
125 servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
126 servers.each { |server| puts server }
129 hostOrServers = ARGV[1..ARGV.size]
131 # Create a logger and save it to ruby global
132 $LOG = LoggerFactory.getLogger(NAME)
139 addServers(options, hostOrServers)
145 removeServers(options, hostOrServers)