HBASE-19498 Fix findbugs and error-prone warnings in hbase-client (branch-2)
[hbase.git] / hbase-client / src / main / java / org / apache / hadoop / hbase / client / HBaseAdmin.java
blob2ea7c74f173e630ad8829763d009387a8db924fb
1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.client;
20 import com.google.protobuf.Descriptors;
21 import com.google.protobuf.Message;
22 import com.google.protobuf.RpcController;
24 import java.io.Closeable;
25 import java.io.IOException;
26 import java.io.InterruptedIOException;
27 import java.nio.charset.StandardCharsets;
28 import java.util.ArrayList;
29 import java.util.Arrays;
30 import java.util.Collection;
31 import java.util.EnumSet;
32 import java.util.HashMap;
33 import java.util.Iterator;
34 import java.util.LinkedList;
35 import java.util.List;
36 import java.util.Map;
37 import java.util.Set;
38 import java.util.concurrent.Callable;
39 import java.util.concurrent.ExecutionException;
40 import java.util.concurrent.Future;
41 import java.util.concurrent.TimeUnit;
42 import java.util.concurrent.TimeoutException;
43 import java.util.concurrent.atomic.AtomicInteger;
44 import java.util.concurrent.atomic.AtomicReference;
45 import java.util.regex.Pattern;
46 import java.util.stream.Collectors;
47 import java.util.stream.Stream;
49 import org.apache.commons.logging.Log;
50 import org.apache.commons.logging.LogFactory;
51 import org.apache.hadoop.conf.Configuration;
52 import org.apache.hadoop.hbase.Abortable;
53 import org.apache.hadoop.hbase.CacheEvictionStats;
54 import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
55 import org.apache.hadoop.hbase.ClusterStatus;
56 import org.apache.hadoop.hbase.ClusterStatus.Option;
57 import org.apache.hadoop.hbase.DoNotRetryIOException;
58 import org.apache.hadoop.hbase.HBaseConfiguration;
59 import org.apache.hadoop.hbase.HConstants;
60 import org.apache.hadoop.hbase.HRegionInfo;
61 import org.apache.hadoop.hbase.HRegionLocation;
62 import org.apache.hadoop.hbase.HTableDescriptor;
63 import org.apache.hadoop.hbase.MasterNotRunningException;
64 import org.apache.hadoop.hbase.MetaTableAccessor;
65 import org.apache.hadoop.hbase.NamespaceDescriptor;
66 import org.apache.hadoop.hbase.NamespaceNotFoundException;
67 import org.apache.hadoop.hbase.NotServingRegionException;
68 import org.apache.hadoop.hbase.RegionLoad;
69 import org.apache.hadoop.hbase.RegionLocations;
70 import org.apache.hadoop.hbase.ServerName;
71 import org.apache.hadoop.hbase.TableExistsException;
72 import org.apache.hadoop.hbase.TableName;
73 import org.apache.hadoop.hbase.TableNotDisabledException;
74 import org.apache.hadoop.hbase.TableNotFoundException;
75 import org.apache.hadoop.hbase.UnknownRegionException;
76 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
77 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
78 import org.apache.hadoop.hbase.client.replication.TableCFs;
79 import org.apache.hadoop.hbase.client.security.SecurityCapability;
80 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
81 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
82 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
83 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
84 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
85 import org.apache.hadoop.hbase.quotas.QuotaFilter;
86 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
87 import org.apache.hadoop.hbase.quotas.QuotaSettings;
88 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
89 import org.apache.hadoop.hbase.replication.ReplicationException;
90 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
91 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
92 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
93 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
94 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
95 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
96 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
97 import org.apache.hadoop.hbase.util.Addressing;
98 import org.apache.hadoop.hbase.util.Bytes;
99 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
100 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
101 import org.apache.hadoop.hbase.util.Pair;
102 import org.apache.hadoop.ipc.RemoteException;
103 import org.apache.hadoop.util.StringUtils;
104 import org.apache.yetus.audience.InterfaceAudience;
105 import org.apache.yetus.audience.InterfaceStability;
106 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
107 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
108 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
109 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
110 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
111 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
112 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
113 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
114 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
115 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
116 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
117 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
118 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
119 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
120 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
121 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
122 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
123 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
124 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
125 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
126 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
127 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
128 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
129 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
130 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
131 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
132 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
133 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
134 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
135 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
136 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
137 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
138 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
139 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
140 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
141 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
142 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
143 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
144 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
145 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
146 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
147 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
148 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
149 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
150 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
151 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
152 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
153 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
154 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
155 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
156 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
157 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
158 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
159 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
160 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
161 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
162 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
163 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
164 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
165 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
166 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
167 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
168 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
169 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
170 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
171 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
172 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
173 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
174 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
175 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
176 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
177 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
178 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
179 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
180 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
181 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
182 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
183 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
184 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
185 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
186 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
187 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
188 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
189 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
190 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
191 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
192 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
193 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
194 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
195 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
196 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
197 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
198 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
199 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
200 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
201 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
202 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
203 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
204 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
205 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
208 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
209 * this is an HBase-internal class as defined in
210 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
211 * There are no guarantees for backwards source / binary compatibility and methods or class can
212 * change or go away without deprecation.
213 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
214 * an HBaseAdmin directly.
216 * <p>Connection should be an <i>unmanaged</i> connection obtained via
217 * {@link ConnectionFactory#createConnection(Configuration)}
219 * @see ConnectionFactory
220 * @see Connection
221 * @see Admin
223 @InterfaceAudience.Private
224 @InterfaceStability.Evolving
225 public class HBaseAdmin implements Admin {
226 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
228 private ClusterConnection connection;
230 private volatile Configuration conf;
231 private final long pause;
232 private final int numRetries;
233 private final int syncWaitTimeout;
234 private boolean aborted;
235 private int operationTimeout;
236 private int rpcTimeout;
238 private RpcRetryingCallerFactory rpcCallerFactory;
239 private RpcControllerFactory rpcControllerFactory;
241 private NonceGenerator ng;
243 @Override
244 public int getOperationTimeout() {
245 return operationTimeout;
248 HBaseAdmin(ClusterConnection connection) throws IOException {
249 this.conf = connection.getConfiguration();
250 this.connection = connection;
252 // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
253 this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
254 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
255 this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
256 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
257 this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
258 HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
259 this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
260 HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
261 this.syncWaitTimeout = this.conf.getInt(
262 "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
264 this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
265 this.rpcControllerFactory = connection.getRpcControllerFactory();
267 this.ng = this.connection.getNonceGenerator();
270 @Override
271 public void abort(String why, Throwable e) {
272 // Currently does nothing but throw the passed message and exception
273 this.aborted = true;
274 throw new RuntimeException(why, e);
277 @Override
278 public boolean isAborted() {
279 return this.aborted;
282 @Override
283 public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
284 throws IOException {
285 return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
286 TimeUnit.MILLISECONDS);
289 @Override
290 public Future<Boolean> abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
291 throws IOException {
292 Boolean abortProcResponse =
293 executeCallable(new MasterCallable<AbortProcedureResponse>(getConnection(),
294 getRpcControllerFactory()) {
295 @Override
296 protected AbortProcedureResponse rpcCall() throws Exception {
297 AbortProcedureRequest abortProcRequest =
298 AbortProcedureRequest.newBuilder().setProcId(procId).build();
299 return master.abortProcedure(getRpcController(), abortProcRequest);
301 }).getIsProcedureAborted();
302 return new AbortProcedureFuture(this, procId, abortProcResponse);
305 @Override
306 public List<TableDescriptor> listTableDescriptors() throws IOException {
307 return listTableDescriptors((Pattern)null, false);
310 @Override
311 public List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException {
312 return listTableDescriptors(pattern, false);
315 @Override
316 public List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
317 throws IOException {
318 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(),
319 getRpcControllerFactory()) {
320 @Override
321 protected List<TableDescriptor> rpcCall() throws Exception {
322 GetTableDescriptorsRequest req =
323 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
324 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
325 req));
330 @Override
331 public TableDescriptor getDescriptor(TableName tableName)
332 throws TableNotFoundException, IOException {
333 return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
334 operationTimeout, rpcTimeout);
337 @Override
338 public void modifyTable(TableDescriptor td) throws IOException {
339 get(modifyTableAsync(td), syncWaitTimeout, TimeUnit.MILLISECONDS);
342 @Override
343 public Future<Void> modifyTableAsync(TableDescriptor td) throws IOException {
344 ModifyTableResponse response = executeCallable(
345 new MasterCallable<ModifyTableResponse>(getConnection(), getRpcControllerFactory()) {
346 @Override
347 protected ModifyTableResponse rpcCall() throws Exception {
348 setPriority(td.getTableName());
349 ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
350 td.getTableName(), td, ng.getNonceGroup(), ng.newNonce());
351 return master.modifyTable(getRpcController(), request);
354 return new ModifyTableFuture(this, td.getTableName(), response);
357 @Override
358 public List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException {
359 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(),
360 getRpcControllerFactory()) {
361 @Override
362 protected List<TableDescriptor> rpcCall() throws Exception {
363 return master.listTableDescriptorsByNamespace(getRpcController(),
364 ListTableDescriptorsByNamespaceRequest.newBuilder()
365 .setNamespaceName(Bytes.toString(name)).build())
366 .getTableSchemaList()
367 .stream()
368 .map(ProtobufUtil::toTableDescriptor)
369 .collect(Collectors.toList());
374 @Override
375 public List<TableDescriptor> listTableDescriptors(List<TableName> tableNames) throws IOException {
376 return executeCallable(new MasterCallable<List<TableDescriptor>>(getConnection(),
377 getRpcControllerFactory()) {
378 @Override
379 protected List<TableDescriptor> rpcCall() throws Exception {
380 GetTableDescriptorsRequest req =
381 RequestConverter.buildGetTableDescriptorsRequest(tableNames);
382 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
383 req));
388 @Override
389 public List<RegionInfo> getRegions(final ServerName sn) throws IOException {
390 AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
391 // TODO: There is no timeout on this controller. Set one!
392 HBaseRpcController controller = rpcControllerFactory.newController();
393 return ProtobufUtil.getOnlineRegions(controller, admin);
396 @Override
397 public List<RegionInfo> getRegions(TableName tableName) throws IOException {
398 if (TableName.isMetaTableName(tableName)) {
399 return Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
400 } else {
401 return MetaTableAccessor.getTableRegions(connection, tableName, true);
405 private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
406 private boolean isAbortInProgress;
408 public AbortProcedureFuture(
409 final HBaseAdmin admin,
410 final Long procId,
411 final Boolean abortProcResponse) {
412 super(admin, procId);
413 this.isAbortInProgress = abortProcResponse;
416 @Override
417 public Boolean get(long timeout, TimeUnit unit)
418 throws InterruptedException, ExecutionException, TimeoutException {
419 if (!this.isAbortInProgress) {
420 return false;
422 super.get(timeout, unit);
423 return true;
427 /** @return Connection used by this object. */
428 @Override
429 public Connection getConnection() {
430 return connection;
433 @Override
434 public boolean tableExists(final TableName tableName) throws IOException {
435 return executeCallable(new RpcRetryingCallable<Boolean>() {
436 @Override
437 protected Boolean rpcCall(int callTimeout) throws Exception {
438 return MetaTableAccessor.tableExists(connection, tableName);
443 @Override
444 public HTableDescriptor[] listTables() throws IOException {
445 return listTables((Pattern)null, false);
448 @Override
449 public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
450 return listTables(pattern, false);
453 @Override
454 public HTableDescriptor[] listTables(String regex) throws IOException {
455 return listTables(Pattern.compile(regex), false);
458 @Override
459 public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
460 throws IOException {
461 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(),
462 getRpcControllerFactory()) {
463 @Override
464 protected HTableDescriptor[] rpcCall() throws Exception {
465 GetTableDescriptorsRequest req =
466 RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
467 return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
468 req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
473 @Override
474 public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
475 throws IOException {
476 return listTables(Pattern.compile(regex), includeSysTables);
479 @Override
480 public TableName[] listTableNames() throws IOException {
481 return listTableNames((Pattern)null, false);
484 @Override
485 public TableName[] listTableNames(Pattern pattern) throws IOException {
486 return listTableNames(pattern, false);
489 @Override
490 public TableName[] listTableNames(String regex) throws IOException {
491 return listTableNames(Pattern.compile(regex), false);
494 @Override
495 public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
496 throws IOException {
497 return executeCallable(new MasterCallable<TableName[]>(getConnection(),
498 getRpcControllerFactory()) {
499 @Override
500 protected TableName[] rpcCall() throws Exception {
501 GetTableNamesRequest req =
502 RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
503 return ProtobufUtil.getTableNameArray(master.getTableNames(getRpcController(), req)
504 .getTableNamesList());
509 @Override
510 public TableName[] listTableNames(final String regex, final boolean includeSysTables)
511 throws IOException {
512 return listTableNames(Pattern.compile(regex), includeSysTables);
515 @Override
516 public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
517 return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
518 operationTimeout, rpcTimeout);
521 static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
522 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
523 int operationTimeout, int rpcTimeout) throws IOException {
524 if (tableName == null) return null;
525 TableDescriptor td =
526 executeCallable(new MasterCallable<TableDescriptor>(connection, rpcControllerFactory) {
527 @Override
528 protected TableDescriptor rpcCall() throws Exception {
529 GetTableDescriptorsRequest req =
530 RequestConverter.buildGetTableDescriptorsRequest(tableName);
531 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
532 if (!htds.getTableSchemaList().isEmpty()) {
533 return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
535 return null;
537 }, rpcCallerFactory, operationTimeout, rpcTimeout);
538 if (td != null) {
539 return td;
541 throw new TableNotFoundException(tableName.getNameAsString());
545 * @deprecated since 2.0 version and will be removed in 3.0 version.
546 * use {@link #getTableDescriptor(TableName,
547 * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)}
549 @Deprecated
550 static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection,
551 RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
552 int operationTimeout, int rpcTimeout) throws IOException {
553 if (tableName == null) {
554 return null;
556 HTableDescriptor htd =
557 executeCallable(new MasterCallable<HTableDescriptor>(connection, rpcControllerFactory) {
558 @Override
559 protected HTableDescriptor rpcCall() throws Exception {
560 GetTableDescriptorsRequest req =
561 RequestConverter.buildGetTableDescriptorsRequest(tableName);
562 GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
563 if (!htds.getTableSchemaList().isEmpty()) {
564 return new ImmutableHTableDescriptor(
565 ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
567 return null;
569 }, rpcCallerFactory, operationTimeout, rpcTimeout);
570 if (htd != null) {
571 return new ImmutableHTableDescriptor(htd);
573 throw new TableNotFoundException(tableName.getNameAsString());
576 private long getPauseTime(int tries) {
577 int triesCount = tries;
578 if (triesCount >= HConstants.RETRY_BACKOFF.length) {
579 triesCount = HConstants.RETRY_BACKOFF.length - 1;
581 return this.pause * HConstants.RETRY_BACKOFF[triesCount];
584 @Override
585 public void createTable(TableDescriptor desc)
586 throws IOException {
587 createTable(desc, null);
590 @Override
591 public void createTable(TableDescriptor desc, byte [] startKey,
592 byte [] endKey, int numRegions)
593 throws IOException {
594 if(numRegions < 3) {
595 throw new IllegalArgumentException("Must create at least three regions");
596 } else if(Bytes.compareTo(startKey, endKey) >= 0) {
597 throw new IllegalArgumentException("Start key must be smaller than end key");
599 if (numRegions == 3) {
600 createTable(desc, new byte[][]{startKey, endKey});
601 return;
603 byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
604 if(splitKeys == null || splitKeys.length != numRegions - 1) {
605 throw new IllegalArgumentException("Unable to split key range into enough regions");
607 createTable(desc, splitKeys);
610 @Override
611 public void createTable(final TableDescriptor desc, byte [][] splitKeys)
612 throws IOException {
613 get(createTableAsync(desc, splitKeys), syncWaitTimeout, TimeUnit.MILLISECONDS);
616 @Override
617 public Future<Void> createTableAsync(final TableDescriptor desc, final byte[][] splitKeys)
618 throws IOException {
619 if (desc.getTableName() == null) {
620 throw new IllegalArgumentException("TableName cannot be null");
622 if (splitKeys != null && splitKeys.length > 0) {
623 Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
624 // Verify there are no duplicate split keys
625 byte[] lastKey = null;
626 for (byte[] splitKey : splitKeys) {
627 if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
628 throw new IllegalArgumentException(
629 "Empty split key must not be passed in the split keys.");
631 if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
632 throw new IllegalArgumentException("All split keys must be unique, " +
633 "found duplicate: " + Bytes.toStringBinary(splitKey) +
634 ", " + Bytes.toStringBinary(lastKey));
636 lastKey = splitKey;
640 CreateTableResponse response = executeCallable(
641 new MasterCallable<CreateTableResponse>(getConnection(), getRpcControllerFactory()) {
642 @Override
643 protected CreateTableResponse rpcCall() throws Exception {
644 setPriority(desc.getTableName());
645 CreateTableRequest request = RequestConverter.buildCreateTableRequest(
646 desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
647 return master.createTable(getRpcController(), request);
650 return new CreateTableFuture(this, desc, splitKeys, response);
653 private static class CreateTableFuture extends TableFuture<Void> {
654 private final TableDescriptor desc;
655 private final byte[][] splitKeys;
657 public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc,
658 final byte[][] splitKeys, final CreateTableResponse response) {
659 super(admin, desc.getTableName(),
660 (response != null && response.hasProcId()) ? response.getProcId() : null);
661 this.splitKeys = splitKeys;
662 this.desc = desc;
665 @Override
666 protected TableDescriptor getTableDescriptor() {
667 return desc;
670 @Override
671 public String getOperationType() {
672 return "CREATE";
675 @Override
676 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
677 waitForTableEnabled(deadlineTs);
678 waitForAllRegionsOnline(deadlineTs, splitKeys);
679 return null;
683 @Override
684 public void deleteTable(final TableName tableName) throws IOException {
685 get(deleteTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
688 @Override
689 public Future<Void> deleteTableAsync(final TableName tableName) throws IOException {
690 DeleteTableResponse response = executeCallable(
691 new MasterCallable<DeleteTableResponse>(getConnection(), getRpcControllerFactory()) {
692 @Override
693 protected DeleteTableResponse rpcCall() throws Exception {
694 setPriority(tableName);
695 DeleteTableRequest req =
696 RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
697 return master.deleteTable(getRpcController(), req);
700 return new DeleteTableFuture(this, tableName, response);
703 private static class DeleteTableFuture extends TableFuture<Void> {
704 public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
705 final DeleteTableResponse response) {
706 super(admin, tableName,
707 (response != null && response.hasProcId()) ? response.getProcId() : null);
710 @Override
711 public String getOperationType() {
712 return "DELETE";
715 @Override
716 protected Void waitOperationResult(final long deadlineTs)
717 throws IOException, TimeoutException {
718 waitTableNotFound(deadlineTs);
719 return null;
722 @Override
723 protected Void postOperationResult(final Void result, final long deadlineTs)
724 throws IOException, TimeoutException {
725 // Delete cached information to prevent clients from using old locations
726 ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName());
727 return super.postOperationResult(result, deadlineTs);
731 @Override
732 public HTableDescriptor[] deleteTables(String regex) throws IOException {
733 return deleteTables(Pattern.compile(regex));
737 * Delete tables matching the passed in pattern and wait on completion.
739 * Warning: Use this method carefully, there is no prompting and the effect is
740 * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
741 * {@link #deleteTable(TableName)}
743 * @param pattern The pattern to match table names against
744 * @return Table descriptors for tables that couldn't be deleted
745 * @throws IOException
747 @Override
748 public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
749 List<HTableDescriptor> failed = new LinkedList<>();
750 for (HTableDescriptor table : listTables(pattern)) {
751 try {
752 deleteTable(table.getTableName());
753 } catch (IOException ex) {
754 LOG.info("Failed to delete table " + table.getTableName(), ex);
755 failed.add(table);
758 return failed.toArray(new HTableDescriptor[failed.size()]);
761 @Override
762 public void truncateTable(final TableName tableName, final boolean preserveSplits)
763 throws IOException {
764 get(truncateTableAsync(tableName, preserveSplits), syncWaitTimeout, TimeUnit.MILLISECONDS);
767 @Override
768 public Future<Void> truncateTableAsync(final TableName tableName, final boolean preserveSplits)
769 throws IOException {
770 TruncateTableResponse response =
771 executeCallable(new MasterCallable<TruncateTableResponse>(getConnection(),
772 getRpcControllerFactory()) {
773 @Override
774 protected TruncateTableResponse rpcCall() throws Exception {
775 setPriority(tableName);
776 LOG.info("Started truncating " + tableName);
777 TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
778 tableName, preserveSplits, ng.getNonceGroup(), ng.newNonce());
779 return master.truncateTable(getRpcController(), req);
782 return new TruncateTableFuture(this, tableName, preserveSplits, response);
785 private static class TruncateTableFuture extends TableFuture<Void> {
786 private final boolean preserveSplits;
788 public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName,
789 final boolean preserveSplits, final TruncateTableResponse response) {
790 super(admin, tableName,
791 (response != null && response.hasProcId()) ? response.getProcId() : null);
792 this.preserveSplits = preserveSplits;
795 @Override
796 public String getOperationType() {
797 return "TRUNCATE";
800 @Override
801 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
802 waitForTableEnabled(deadlineTs);
803 // once the table is enabled, we know the operation is done. so we can fetch the splitKeys
804 byte[][] splitKeys = preserveSplits ? getAdmin().getTableSplits(getTableName()) : null;
805 waitForAllRegionsOnline(deadlineTs, splitKeys);
806 return null;
810 private byte[][] getTableSplits(final TableName tableName) throws IOException {
811 byte[][] splits = null;
812 try (RegionLocator locator = getConnection().getRegionLocator(tableName)) {
813 byte[][] startKeys = locator.getStartKeys();
814 if (startKeys.length == 1) {
815 return splits;
817 splits = new byte[startKeys.length - 1][];
818 for (int i = 1; i < startKeys.length; i++) {
819 splits[i - 1] = startKeys[i];
822 return splits;
825 @Override
826 public void enableTable(final TableName tableName)
827 throws IOException {
828 get(enableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
831 @Override
832 public Future<Void> enableTableAsync(final TableName tableName) throws IOException {
833 TableName.isLegalFullyQualifiedTableName(tableName.getName());
834 EnableTableResponse response = executeCallable(
835 new MasterCallable<EnableTableResponse>(getConnection(), getRpcControllerFactory()) {
836 @Override
837 protected EnableTableResponse rpcCall() throws Exception {
838 setPriority(tableName);
839 LOG.info("Started enable of " + tableName);
840 EnableTableRequest req =
841 RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
842 return master.enableTable(getRpcController(),req);
845 return new EnableTableFuture(this, tableName, response);
848 private static class EnableTableFuture extends TableFuture<Void> {
849 public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
850 final EnableTableResponse response) {
851 super(admin, tableName,
852 (response != null && response.hasProcId()) ? response.getProcId() : null);
855 @Override
856 public String getOperationType() {
857 return "ENABLE";
860 @Override
861 protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
862 waitForTableEnabled(deadlineTs);
863 return null;
867 @Override
868 public HTableDescriptor[] enableTables(String regex) throws IOException {
869 return enableTables(Pattern.compile(regex));
872 @Override
873 public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
874 List<HTableDescriptor> failed = new LinkedList<>();
875 for (HTableDescriptor table : listTables(pattern)) {
876 if (isTableDisabled(table.getTableName())) {
877 try {
878 enableTable(table.getTableName());
879 } catch (IOException ex) {
880 LOG.info("Failed to enable table " + table.getTableName(), ex);
881 failed.add(table);
885 return failed.toArray(new HTableDescriptor[failed.size()]);
888 @Override
889 public void disableTable(final TableName tableName)
890 throws IOException {
891 get(disableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
894 @Override
895 public Future<Void> disableTableAsync(final TableName tableName) throws IOException {
896 TableName.isLegalFullyQualifiedTableName(tableName.getName());
897 DisableTableResponse response = executeCallable(
898 new MasterCallable<DisableTableResponse>(getConnection(), getRpcControllerFactory()) {
899 @Override
900 protected DisableTableResponse rpcCall() throws Exception {
901 setPriority(tableName);
902 LOG.info("Started disable of " + tableName);
903 DisableTableRequest req =
904 RequestConverter.buildDisableTableRequest(
905 tableName, ng.getNonceGroup(), ng.newNonce());
906 return master.disableTable(getRpcController(), req);
909 return new DisableTableFuture(this, tableName, response);
912 private static class DisableTableFuture extends TableFuture<Void> {
913 public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
914 final DisableTableResponse response) {
915 super(admin, tableName,
916 (response != null && response.hasProcId()) ? response.getProcId() : null);
919 @Override
920 public String getOperationType() {
921 return "DISABLE";
924 @Override
925 protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException {
926 waitForTableDisabled(deadlineTs);
927 return null;
931 @Override
932 public HTableDescriptor[] disableTables(String regex) throws IOException {
933 return disableTables(Pattern.compile(regex));
936 @Override
937 public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
938 List<HTableDescriptor> failed = new LinkedList<>();
939 for (HTableDescriptor table : listTables(pattern)) {
940 if (isTableEnabled(table.getTableName())) {
941 try {
942 disableTable(table.getTableName());
943 } catch (IOException ex) {
944 LOG.info("Failed to disable table " + table.getTableName(), ex);
945 failed.add(table);
949 return failed.toArray(new HTableDescriptor[failed.size()]);
952 @Override
953 public boolean isTableEnabled(final TableName tableName) throws IOException {
954 checkTableExists(tableName);
955 return executeCallable(new RpcRetryingCallable<Boolean>() {
956 @Override
957 protected Boolean rpcCall(int callTimeout) throws Exception {
958 TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
959 if (tableState == null) {
960 throw new TableNotFoundException(tableName);
962 return tableState.inStates(TableState.State.ENABLED);
967 @Override
968 public boolean isTableDisabled(TableName tableName) throws IOException {
969 checkTableExists(tableName);
970 return connection.isTableDisabled(tableName);
973 @Override
974 public boolean isTableAvailable(TableName tableName) throws IOException {
975 return connection.isTableAvailable(tableName, null);
978 @Override
979 public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
980 return connection.isTableAvailable(tableName, splitKeys);
983 @Override
984 public Pair<Integer, Integer> getAlterStatus(final TableName tableName) throws IOException {
985 return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection(),
986 getRpcControllerFactory()) {
987 @Override
988 protected Pair<Integer, Integer> rpcCall() throws Exception {
989 setPriority(tableName);
990 GetSchemaAlterStatusRequest req = RequestConverter
991 .buildGetSchemaAlterStatusRequest(tableName);
992 GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req);
993 Pair<Integer, Integer> pair = new Pair<>(ret.getYetToUpdateRegions(),
994 ret.getTotalRegions());
995 return pair;
1000 @Override
1001 public Pair<Integer, Integer> getAlterStatus(final byte[] tableName) throws IOException {
1002 return getAlterStatus(TableName.valueOf(tableName));
1005 @Override
1006 public void addColumnFamily(final TableName tableName, final ColumnFamilyDescriptor columnFamily)
1007 throws IOException {
1008 get(addColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
1011 @Override
1012 public Future<Void> addColumnFamilyAsync(final TableName tableName,
1013 final ColumnFamilyDescriptor columnFamily) throws IOException {
1014 AddColumnResponse response =
1015 executeCallable(new MasterCallable<AddColumnResponse>(getConnection(),
1016 getRpcControllerFactory()) {
1017 @Override
1018 protected AddColumnResponse rpcCall() throws Exception {
1019 setPriority(tableName);
1020 AddColumnRequest req =
1021 RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
1022 ng.newNonce());
1023 return master.addColumn(getRpcController(), req);
1026 return new AddColumnFamilyFuture(this, tableName, response);
1029 private static class AddColumnFamilyFuture extends ModifyTableFuture {
1030 public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1031 final AddColumnResponse response) {
1032 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
1033 : null);
1036 @Override
1037 public String getOperationType() {
1038 return "ADD_COLUMN_FAMILY";
1043 * {@inheritDoc}
1044 * @deprecated Since 2.0. Will be removed in 3.0. Use
1045 * {@link #deleteColumnFamily(TableName, byte[])} instead.
1047 @Override
1048 @Deprecated
1049 public void deleteColumn(final TableName tableName, final byte[] columnFamily)
1050 throws IOException {
1051 deleteColumnFamily(tableName, columnFamily);
1054 @Override
1055 public void deleteColumnFamily(final TableName tableName, final byte[] columnFamily)
1056 throws IOException {
1057 get(deleteColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
1060 @Override
1061 public Future<Void> deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily)
1062 throws IOException {
1063 DeleteColumnResponse response =
1064 executeCallable(new MasterCallable<DeleteColumnResponse>(getConnection(),
1065 getRpcControllerFactory()) {
1066 @Override
1067 protected DeleteColumnResponse rpcCall() throws Exception {
1068 setPriority(tableName);
1069 DeleteColumnRequest req =
1070 RequestConverter.buildDeleteColumnRequest(tableName, columnFamily,
1071 ng.getNonceGroup(), ng.newNonce());
1072 return master.deleteColumn(getRpcController(), req);
1075 return new DeleteColumnFamilyFuture(this, tableName, response);
1078 private static class DeleteColumnFamilyFuture extends ModifyTableFuture {
1079 public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1080 final DeleteColumnResponse response) {
1081 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
1082 : null);
1085 @Override
1086 public String getOperationType() {
1087 return "DELETE_COLUMN_FAMILY";
1091 @Override
1092 public void modifyColumnFamily(final TableName tableName,
1093 final ColumnFamilyDescriptor columnFamily) throws IOException {
1094 get(modifyColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
1097 @Override
1098 public Future<Void> modifyColumnFamilyAsync(final TableName tableName,
1099 final ColumnFamilyDescriptor columnFamily) throws IOException {
1100 ModifyColumnResponse response =
1101 executeCallable(new MasterCallable<ModifyColumnResponse>(getConnection(),
1102 getRpcControllerFactory()) {
1103 @Override
1104 protected ModifyColumnResponse rpcCall() throws Exception {
1105 setPriority(tableName);
1106 ModifyColumnRequest req =
1107 RequestConverter.buildModifyColumnRequest(tableName, columnFamily,
1108 ng.getNonceGroup(), ng.newNonce());
1109 return master.modifyColumn(getRpcController(), req);
1112 return new ModifyColumnFamilyFuture(this, tableName, response);
1115 private static class ModifyColumnFamilyFuture extends ModifyTableFuture {
1116 public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
1117 final ModifyColumnResponse response) {
1118 super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
1119 : null);
1122 @Override
1123 public String getOperationType() {
1124 return "MODIFY_COLUMN_FAMILY";
1128 @Deprecated
1129 @Override
1130 public void closeRegion(final String regionName, final String unused) throws IOException {
1131 unassign(Bytes.toBytes(regionName), true);
1134 @Deprecated
1135 @Override
1136 public void closeRegion(final byte [] regionName, final String unused) throws IOException {
1137 unassign(regionName, true);
1140 @Deprecated
1141 @Override
1142 public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1143 final String unused) throws IOException {
1144 unassign(Bytes.toBytes(encodedRegionName), true);
1145 return true;
1148 @Deprecated
1149 @Override
1150 public void closeRegion(final ServerName unused, final HRegionInfo hri) throws IOException {
1151 unassign(hri.getRegionName(), true);
1155 * @param sn
1156 * @return List of {@link HRegionInfo}.
1157 * @throws IOException
1158 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
1159 * Use {@link #getRegions(ServerName)}.
1161 @Deprecated
1162 @Override
1163 public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1164 return getRegions(sn).stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
1167 @Override
1168 public void flush(final TableName tableName) throws IOException {
1169 checkTableExists(tableName);
1170 if (isTableDisabled(tableName)) {
1171 LOG.info("Table is disabled: " + tableName.getNameAsString());
1172 return;
1174 execProcedure("flush-table-proc", tableName.getNameAsString(), new HashMap<>());
1177 @Override
1178 public void flushRegion(final byte[] regionName) throws IOException {
1179 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1180 if (regionServerPair == null) {
1181 throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1183 if (regionServerPair.getSecond() == null) {
1184 throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1186 final RegionInfo hRegionInfo = regionServerPair.getFirst();
1187 ServerName serverName = regionServerPair.getSecond();
1188 final AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
1189 Callable<Void> callable = new Callable<Void>() {
1190 @Override
1191 public Void call() throws Exception {
1192 // TODO: There is no timeout on this controller. Set one!
1193 HBaseRpcController controller = rpcControllerFactory.newController();
1194 FlushRegionRequest request =
1195 RequestConverter.buildFlushRegionRequest(hRegionInfo.getRegionName());
1196 admin.flushRegion(controller, request);
1197 return null;
1200 ProtobufUtil.call(callable);
1204 * {@inheritDoc}
1206 @Override
1207 public void compact(final TableName tableName)
1208 throws IOException {
1209 compact(tableName, null, false, CompactType.NORMAL);
1212 @Override
1213 public void compactRegion(final byte[] regionName)
1214 throws IOException {
1215 compactRegion(regionName, null, false);
1219 * {@inheritDoc}
1221 @Override
1222 public void compact(final TableName tableName, final byte[] columnFamily)
1223 throws IOException {
1224 compact(tableName, columnFamily, false, CompactType.NORMAL);
1228 * {@inheritDoc}
1230 @Override
1231 public void compactRegion(final byte[] regionName, final byte[] columnFamily)
1232 throws IOException {
1233 compactRegion(regionName, columnFamily, false);
1236 @Override
1237 public void compactRegionServer(final ServerName serverName) throws IOException {
1238 for (RegionInfo region : getRegions(serverName)) {
1239 compact(this.connection.getAdmin(serverName), region, false, null);
1243 @Override
1244 public void majorCompactRegionServer(final ServerName serverName) throws IOException {
1245 for (RegionInfo region : getRegions(serverName)) {
1246 compact(this.connection.getAdmin(serverName), region, true, null);
1250 @Override
1251 public void majorCompact(final TableName tableName)
1252 throws IOException {
1253 compact(tableName, null, true, CompactType.NORMAL);
1256 @Override
1257 public void majorCompactRegion(final byte[] regionName)
1258 throws IOException {
1259 compactRegion(regionName, null, true);
1263 * {@inheritDoc}
1265 @Override
1266 public void majorCompact(final TableName tableName, final byte[] columnFamily)
1267 throws IOException {
1268 compact(tableName, columnFamily, true, CompactType.NORMAL);
1271 @Override
1272 public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
1273 throws IOException {
1274 compactRegion(regionName, columnFamily, true);
1278 * Compact a table.
1279 * Asynchronous operation.
1281 * @param tableName table or region to compact
1282 * @param columnFamily column family within a table or region
1283 * @param major True if we are to do a major compaction.
1284 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
1285 * @throws IOException if a remote or network exception occurs
1287 private void compact(final TableName tableName, final byte[] columnFamily,final boolean major,
1288 CompactType compactType) throws IOException {
1289 switch (compactType) {
1290 case MOB:
1291 compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName),
1292 major, columnFamily);
1293 break;
1294 case NORMAL:
1295 checkTableExists(tableName);
1296 for (HRegionLocation loc :connection.locateRegions(tableName, false, false)) {
1297 ServerName sn = loc.getServerName();
1298 if (sn == null) {
1299 continue;
1301 try {
1302 compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily);
1303 } catch (NotServingRegionException e) {
1304 if (LOG.isDebugEnabled()) {
1305 LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() +
1306 ": " + StringUtils.stringifyException(e));
1310 break;
1311 default:
1312 throw new IllegalArgumentException("Unknown compactType: " + compactType);
1317 * Compact an individual region.
1318 * Asynchronous operation.
1320 * @param regionName region to compact
1321 * @param columnFamily column family within a table or region
1322 * @param major True if we are to do a major compaction.
1323 * @throws IOException if a remote or network exception occurs
1324 * @throws InterruptedException
1326 private void compactRegion(final byte[] regionName, final byte[] columnFamily,
1327 final boolean major) throws IOException {
1328 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1329 if (regionServerPair == null) {
1330 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1332 if (regionServerPair.getSecond() == null) {
1333 throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1335 compact(this.connection.getAdmin(regionServerPair.getSecond()), regionServerPair.getFirst(),
1336 major, columnFamily);
1339 private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major,
1340 byte[] family) throws IOException {
1341 Callable<Void> callable = new Callable<Void>() {
1342 @Override
1343 public Void call() throws Exception {
1344 // TODO: There is no timeout on this controller. Set one!
1345 HBaseRpcController controller = rpcControllerFactory.newController();
1346 CompactRegionRequest request =
1347 RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
1348 admin.compactRegion(controller, request);
1349 return null;
1352 ProtobufUtil.call(callable);
1355 @Override
1356 public void move(final byte[] encodedRegionName, final byte[] destServerName) throws IOException {
1357 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1358 @Override
1359 protected Void rpcCall() throws Exception {
1360 setPriority(encodedRegionName);
1361 MoveRegionRequest request =
1362 RequestConverter.buildMoveRegionRequest(encodedRegionName,
1363 destServerName != null ? ServerName.valueOf(Bytes.toString(destServerName)) : null);
1364 master.moveRegion(getRpcController(), request);
1365 return null;
1370 @Override
1371 public void assign(final byte [] regionName) throws MasterNotRunningException,
1372 ZooKeeperConnectionException, IOException {
1373 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1374 @Override
1375 protected Void rpcCall() throws Exception {
1376 setPriority(regionName);
1377 AssignRegionRequest request =
1378 RequestConverter.buildAssignRegionRequest(getRegionName(regionName));
1379 master.assignRegion(getRpcController(), request);
1380 return null;
1385 @Override
1386 public void unassign(final byte [] regionName, final boolean force) throws IOException {
1387 final byte[] toBeUnassigned = getRegionName(regionName);
1388 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1389 @Override
1390 protected Void rpcCall() throws Exception {
1391 setPriority(regionName);
1392 UnassignRegionRequest request =
1393 RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
1394 master.unassignRegion(getRpcController(), request);
1395 return null;
1400 @Override
1401 public void offline(final byte [] regionName)
1402 throws IOException {
1403 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
1404 @Override
1405 protected Void rpcCall() throws Exception {
1406 setPriority(regionName);
1407 master.offlineRegion(getRpcController(),
1408 RequestConverter.buildOfflineRegionRequest(regionName));
1409 return null;
1414 @Override
1415 public boolean balancerSwitch(final boolean on, final boolean synchronous)
1416 throws IOException {
1417 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1418 @Override
1419 protected Boolean rpcCall() throws Exception {
1420 SetBalancerRunningRequest req =
1421 RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
1422 return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue();
1427 @Override
1428 public boolean balance() throws IOException {
1429 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1430 @Override
1431 protected Boolean rpcCall() throws Exception {
1432 return master.balance(getRpcController(),
1433 RequestConverter.buildBalanceRequest(false)).getBalancerRan();
1438 @Override
1439 public boolean balance(final boolean force) throws IOException {
1440 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1441 @Override
1442 protected Boolean rpcCall() throws Exception {
1443 return master.balance(getRpcController(),
1444 RequestConverter.buildBalanceRequest(force)).getBalancerRan();
1449 @Override
1450 public boolean isBalancerEnabled() throws IOException {
1451 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1452 @Override
1453 protected Boolean rpcCall() throws Exception {
1454 return master.isBalancerEnabled(getRpcController(),
1455 RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
1461 * {@inheritDoc}
1463 @Override
1464 public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException {
1465 checkTableExists(tableName);
1466 CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder();
1467 List<Pair<RegionInfo, ServerName>> pairs =
1468 MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
1469 Map<ServerName, List<RegionInfo>> regionInfoByServerName =
1470 pairs.stream()
1471 .filter(pair -> !(pair.getFirst().isOffline()))
1472 .filter(pair -> pair.getSecond() != null)
1473 .collect(Collectors.groupingBy(pair -> pair.getSecond(),
1474 Collectors.mapping(pair -> pair.getFirst(), Collectors.toList())));
1476 for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) {
1477 CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue());
1478 cacheEvictionStats = cacheEvictionStats.append(stats);
1479 if (stats.getExceptionCount() > 0) {
1480 for (Map.Entry<byte[], Throwable> exception : stats.getExceptions().entrySet()) {
1481 LOG.debug("Failed to clear block cache for "
1482 + Bytes.toStringBinary(exception.getKey())
1483 + " on " + entry.getKey() + ": ", exception.getValue());
1487 return cacheEvictionStats.build();
1490 private CacheEvictionStats clearBlockCache(final ServerName sn, final List<RegionInfo> hris)
1491 throws IOException {
1492 HBaseRpcController controller = rpcControllerFactory.newController();
1493 AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1494 ClearRegionBlockCacheRequest request =
1495 RequestConverter.buildClearRegionBlockCacheRequest(hris);
1496 ClearRegionBlockCacheResponse response;
1497 try {
1498 response = admin.clearRegionBlockCache(controller, request);
1499 return ProtobufUtil.toCacheEvictionStats(response.getStats());
1500 } catch (ServiceException se) {
1501 throw ProtobufUtil.getRemoteException(se);
1506 * Invoke region normalizer. Can NOT run for various reasons. Check logs.
1508 * @return True if region normalizer ran, false otherwise.
1510 @Override
1511 public boolean normalize() throws IOException {
1512 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1513 @Override
1514 protected Boolean rpcCall() throws Exception {
1515 return master.normalize(getRpcController(),
1516 RequestConverter.buildNormalizeRequest()).getNormalizerRan();
1521 @Override
1522 public boolean isNormalizerEnabled() throws IOException {
1523 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1524 @Override
1525 protected Boolean rpcCall() throws Exception {
1526 return master.isNormalizerEnabled(getRpcController(),
1527 RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
1532 @Override
1533 public boolean normalizerSwitch(final boolean on) throws IOException {
1534 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1535 @Override
1536 protected Boolean rpcCall() throws Exception {
1537 SetNormalizerRunningRequest req =
1538 RequestConverter.buildSetNormalizerRunningRequest(on);
1539 return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue();
1544 @Override
1545 public boolean catalogJanitorSwitch(final boolean enable) throws IOException {
1546 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1547 @Override
1548 protected Boolean rpcCall() throws Exception {
1549 return master.enableCatalogJanitor(getRpcController(),
1550 RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
1555 @Override
1556 public int runCatalogJanitor() throws IOException {
1557 return executeCallable(new MasterCallable<Integer>(getConnection(), getRpcControllerFactory()) {
1558 @Override
1559 protected Integer rpcCall() throws Exception {
1560 return master.runCatalogScan(getRpcController(),
1561 RequestConverter.buildCatalogScanRequest()).getScanResult();
1566 @Override
1567 public boolean isCatalogJanitorEnabled() throws IOException {
1568 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1569 @Override
1570 protected Boolean rpcCall() throws Exception {
1571 return master.isCatalogJanitorEnabled(getRpcController(),
1572 RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
1577 @Override
1578 public boolean cleanerChoreSwitch(final boolean on) throws IOException {
1579 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1580 @Override public Boolean rpcCall() throws Exception {
1581 return master.setCleanerChoreRunning(getRpcController(),
1582 RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue();
1587 @Override
1588 public boolean runCleanerChore() throws IOException {
1589 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1590 @Override public Boolean rpcCall() throws Exception {
1591 return master.runCleanerChore(getRpcController(),
1592 RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan();
1597 @Override
1598 public boolean isCleanerChoreEnabled() throws IOException {
1599 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
1600 @Override public Boolean rpcCall() throws Exception {
1601 return master.isCleanerChoreEnabled(getRpcController(),
1602 RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue();
1608 * Merge two regions. Synchronous operation.
1609 * Note: It is not feasible to predict the length of merge.
1610 * Therefore, this is for internal testing only.
1611 * @param nameOfRegionA encoded or full name of region a
1612 * @param nameOfRegionB encoded or full name of region b
1613 * @param forcible true if do a compulsory merge, otherwise we will only merge
1614 * two adjacent regions
1615 * @throws IOException
1617 @VisibleForTesting
1618 public void mergeRegionsSync(
1619 final byte[] nameOfRegionA,
1620 final byte[] nameOfRegionB,
1621 final boolean forcible) throws IOException {
1622 get(
1623 mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible),
1624 syncWaitTimeout,
1625 TimeUnit.MILLISECONDS);
1629 * Merge two regions. Asynchronous operation.
1630 * @param nameOfRegionA encoded or full name of region a
1631 * @param nameOfRegionB encoded or full name of region b
1632 * @param forcible true if do a compulsory merge, otherwise we will only merge
1633 * two adjacent regions
1634 * @throws IOException
1635 * @deprecated Since 2.0. Will be removed in 3.0. Use
1636 * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead.
1638 @Deprecated
1639 @Override
1640 public void mergeRegions(final byte[] nameOfRegionA,
1641 final byte[] nameOfRegionB, final boolean forcible)
1642 throws IOException {
1643 mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible);
1647 * Merge two regions. Asynchronous operation.
1648 * @param nameOfRegionA encoded or full name of region a
1649 * @param nameOfRegionB encoded or full name of region b
1650 * @param forcible true if do a compulsory merge, otherwise we will only merge
1651 * two adjacent regions
1652 * @throws IOException
1654 @Override
1655 public Future<Void> mergeRegionsAsync(
1656 final byte[] nameOfRegionA,
1657 final byte[] nameOfRegionB,
1658 final boolean forcible) throws IOException {
1659 byte[][] nameofRegionsToMerge = new byte[2][];
1660 nameofRegionsToMerge[0] = nameOfRegionA;
1661 nameofRegionsToMerge[1] = nameOfRegionB;
1662 return mergeRegionsAsync(nameofRegionsToMerge, forcible);
1666 * Merge two regions. Asynchronous operation.
1667 * @param nameofRegionsToMerge encoded or full name of daughter regions
1668 * @param forcible true if do a compulsory merge, otherwise we will only merge
1669 * adjacent regions
1670 * @throws IOException
1672 @Override
1673 public Future<Void> mergeRegionsAsync(
1674 final byte[][] nameofRegionsToMerge,
1675 final boolean forcible) throws IOException {
1676 assert(nameofRegionsToMerge.length >= 2);
1677 byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][];
1678 for(int i = 0; i < nameofRegionsToMerge.length; i++) {
1679 encodedNameofRegionsToMerge[i] = HRegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ?
1680 nameofRegionsToMerge[i] : HRegionInfo.encodeRegionName(nameofRegionsToMerge[i])
1681 .getBytes(StandardCharsets.UTF_8);
1684 TableName tableName = null;
1685 Pair<RegionInfo, ServerName> pair;
1687 for(int i = 0; i < nameofRegionsToMerge.length; i++) {
1688 pair = getRegion(nameofRegionsToMerge[i]);
1690 if (pair != null) {
1691 if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1692 throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly");
1694 if (tableName == null) {
1695 tableName = pair.getFirst().getTable();
1696 } else if (!tableName.equals(pair.getFirst().getTable())) {
1697 throw new IllegalArgumentException ("Cannot merge regions from two different tables " +
1698 tableName + " and " + pair.getFirst().getTable());
1700 } else {
1701 throw new UnknownRegionException (
1702 "Can't invoke merge on unknown region "
1703 + Bytes.toStringBinary(encodedNameofRegionsToMerge[i]));
1707 MergeTableRegionsResponse response =
1708 executeCallable(new MasterCallable<MergeTableRegionsResponse>(getConnection(),
1709 getRpcControllerFactory()) {
1710 @Override
1711 protected MergeTableRegionsResponse rpcCall() throws Exception {
1712 MergeTableRegionsRequest request = RequestConverter
1713 .buildMergeTableRegionsRequest(
1714 encodedNameofRegionsToMerge,
1715 forcible,
1716 ng.getNonceGroup(),
1717 ng.newNonce());
1718 return master.mergeTableRegions(getRpcController(), request);
1721 return new MergeTableRegionsFuture(this, tableName, response);
1724 private static class MergeTableRegionsFuture extends TableFuture<Void> {
1725 public MergeTableRegionsFuture(
1726 final HBaseAdmin admin,
1727 final TableName tableName,
1728 final MergeTableRegionsResponse response) {
1729 super(admin, tableName,
1730 (response != null && response.hasProcId()) ? response.getProcId() : null);
1733 public MergeTableRegionsFuture(
1734 final HBaseAdmin admin,
1735 final TableName tableName,
1736 final Long procId) {
1737 super(admin, tableName, procId);
1740 @Override
1741 public String getOperationType() {
1742 return "MERGE_REGIONS";
1746 * Split one region. Synchronous operation.
1747 * Note: It is not feasible to predict the length of split.
1748 * Therefore, this is for internal testing only.
1749 * @param regionName encoded or full name of region
1750 * @param splitPoint key where region splits
1751 * @throws IOException
1753 @VisibleForTesting
1754 public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException {
1755 splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS);
1760 * Split one region. Synchronous operation.
1761 * @param regionName region to be split
1762 * @param splitPoint split point
1763 * @param timeout how long to wait on split
1764 * @param units time units
1765 * @throws IOException
1767 public void splitRegionSync(byte[] regionName, byte[] splitPoint,
1768 final long timeout, final TimeUnit units) throws IOException {
1769 get(
1770 splitRegionAsync(regionName, splitPoint),
1771 timeout,
1772 units);
1775 @Override
1776 public Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint)
1777 throws IOException {
1778 byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ?
1779 regionName : HRegionInfo.encodeRegionName(regionName).getBytes(StandardCharsets.UTF_8);
1780 Pair<RegionInfo, ServerName> pair = getRegion(regionName);
1781 if (pair != null) {
1782 if (pair.getFirst() != null &&
1783 pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1784 throw new IllegalArgumentException ("Can't invoke split on non-default regions directly");
1786 } else {
1787 throw new UnknownRegionException (
1788 "Can't invoke merge on unknown region "
1789 + Bytes.toStringBinary(encodedNameofRegionToSplit));
1792 return splitRegionAsync(pair.getFirst(), splitPoint);
1795 Future<Void> splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException {
1796 TableName tableName = hri.getTable();
1797 if (hri.getStartKey() != null && splitPoint != null &&
1798 Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
1799 throw new IOException("should not give a splitkey which equals to startkey!");
1802 SplitTableRegionResponse response = executeCallable(
1803 new MasterCallable<SplitTableRegionResponse>(getConnection(), getRpcControllerFactory()) {
1804 @Override
1805 protected SplitTableRegionResponse rpcCall() throws Exception {
1806 setPriority(tableName);
1807 SplitTableRegionRequest request = RequestConverter
1808 .buildSplitTableRegionRequest(hri, splitPoint, ng.getNonceGroup(), ng.newNonce());
1809 return master.splitRegion(getRpcController(), request);
1812 return new SplitTableRegionFuture(this, tableName, response);
1815 private static class SplitTableRegionFuture extends TableFuture<Void> {
1816 public SplitTableRegionFuture(final HBaseAdmin admin,
1817 final TableName tableName,
1818 final SplitTableRegionResponse response) {
1819 super(admin, tableName,
1820 (response != null && response.hasProcId()) ? response.getProcId() : null);
1823 public SplitTableRegionFuture(
1824 final HBaseAdmin admin,
1825 final TableName tableName,
1826 final Long procId) {
1827 super(admin, tableName, procId);
1830 @Override
1831 public String getOperationType() {
1832 return "SPLIT_REGION";
1836 @Override
1837 public void split(final TableName tableName) throws IOException {
1838 split(tableName, null);
1841 @Override
1842 public void splitRegion(final byte[] regionName) throws IOException {
1843 splitRegion(regionName, null);
1846 @Override
1847 public void split(final TableName tableName, final byte[] splitPoint) throws IOException {
1848 checkTableExists(tableName);
1849 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
1850 ServerName sn = loc.getServerName();
1851 if (sn == null) {
1852 continue;
1854 RegionInfo r = loc.getRegion();
1855 // check for parents
1856 if (r.isSplitParent()) {
1857 continue;
1859 // if a split point given, only split that particular region
1860 if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID ||
1861 (splitPoint != null && !r.containsRow(splitPoint))) {
1862 continue;
1864 // call out to master to do split now
1865 splitRegionAsync(r, splitPoint);
1869 @Override
1870 public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException {
1871 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
1872 if (regionServerPair == null) {
1873 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
1875 if (regionServerPair.getFirst() != null &&
1876 regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1877 throw new IllegalArgumentException("Can't split replicas directly. "
1878 + "Replicas are auto-split when their primary is split.");
1880 if (regionServerPair.getSecond() == null) {
1881 throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1883 splitRegionAsync(regionServerPair.getFirst(), splitPoint);
1886 @Override
1887 public void modifyTable(final TableName tableName, final TableDescriptor td)
1888 throws IOException {
1889 get(modifyTableAsync(tableName, td), syncWaitTimeout, TimeUnit.MILLISECONDS);
1892 @Override
1893 public Future<Void> modifyTableAsync(final TableName tableName, final TableDescriptor td)
1894 throws IOException {
1895 if (!tableName.equals(td.getTableName())) {
1896 throw new IllegalArgumentException("the specified table name '" + tableName +
1897 "' doesn't match with the HTD one: " + td.getTableName());
1899 return modifyTableAsync(td);
1902 private static class ModifyTableFuture extends TableFuture<Void> {
1903 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName,
1904 final ModifyTableResponse response) {
1905 super(admin, tableName,
1906 (response != null && response.hasProcId()) ? response.getProcId() : null);
1909 public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
1910 super(admin, tableName, procId);
1913 @Override
1914 public String getOperationType() {
1915 return "MODIFY";
1918 @Override
1919 protected Void postOperationResult(final Void result, final long deadlineTs)
1920 throws IOException, TimeoutException {
1921 // The modify operation on the table is asynchronous on the server side irrespective
1922 // of whether Procedure V2 is supported or not. So, we wait in the client till
1923 // all regions get updated.
1924 waitForSchemaUpdate(deadlineTs);
1925 return result;
1930 * @param regionName Name of a region.
1931 * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
1932 * a verified region name (we call {@link
1933 * MetaTableAccessor#getRegionLocation(Connection, byte[])}
1934 * else null.
1935 * Throw IllegalArgumentException if <code>regionName</code> is null.
1936 * @throws IOException
1938 Pair<RegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
1939 if (regionName == null) {
1940 throw new IllegalArgumentException("Pass a table name or region name");
1942 Pair<RegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionName);
1943 if (pair == null) {
1944 final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null);
1945 final String encodedName = Bytes.toString(regionName);
1946 MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
1947 @Override
1948 public boolean visit(Result data) throws IOException {
1949 RegionInfo info = MetaTableAccessor.getRegionInfo(data);
1950 if (info == null) {
1951 LOG.warn("No serialized HRegionInfo in " + data);
1952 return true;
1954 RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
1955 boolean matched = false;
1956 ServerName sn = null;
1957 if (rl != null) {
1958 for (HRegionLocation h : rl.getRegionLocations()) {
1959 if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
1960 sn = h.getServerName();
1961 info = h.getRegionInfo();
1962 matched = true;
1966 if (!matched) return true;
1967 result.set(new Pair<>(info, sn));
1968 return false; // found the region, stop
1972 MetaTableAccessor.fullScanRegions(connection, visitor);
1973 pair = result.get();
1975 return pair;
1979 * If the input is a region name, it is returned as is. If it's an
1980 * encoded region name, the corresponding region is found from meta
1981 * and its region name is returned. If we can't find any region in
1982 * meta matching the input as either region name or encoded region
1983 * name, the input is returned as is. We don't throw unknown
1984 * region exception.
1986 private byte[] getRegionName(
1987 final byte[] regionNameOrEncodedRegionName) throws IOException {
1988 if (Bytes.equals(regionNameOrEncodedRegionName,
1989 HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
1990 || Bytes.equals(regionNameOrEncodedRegionName,
1991 HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
1992 return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
1994 byte[] tmp = regionNameOrEncodedRegionName;
1995 Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
1996 if (regionServerPair != null && regionServerPair.getFirst() != null) {
1997 tmp = regionServerPair.getFirst().getRegionName();
1999 return tmp;
2003 * Check if table exists or not
2004 * @param tableName Name of a table.
2005 * @return tableName instance
2006 * @throws IOException if a remote or network exception occurs.
2007 * @throws TableNotFoundException if table does not exist.
2009 private TableName checkTableExists(final TableName tableName)
2010 throws IOException {
2011 return executeCallable(new RpcRetryingCallable<TableName>() {
2012 @Override
2013 protected TableName rpcCall(int callTimeout) throws Exception {
2014 if (!MetaTableAccessor.tableExists(connection, tableName)) {
2015 throw new TableNotFoundException(tableName);
2017 return tableName;
2022 @Override
2023 public synchronized void shutdown() throws IOException {
2024 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2025 @Override
2026 protected Void rpcCall() throws Exception {
2027 setPriority(HConstants.HIGH_QOS);
2028 master.shutdown(getRpcController(), ShutdownRequest.newBuilder().build());
2029 return null;
2034 @Override
2035 public synchronized void stopMaster() throws IOException {
2036 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2037 @Override
2038 protected Void rpcCall() throws Exception {
2039 setPriority(HConstants.HIGH_QOS);
2040 master.stopMaster(getRpcController(), StopMasterRequest.newBuilder().build());
2041 return null;
2046 @Override
2047 public synchronized void stopRegionServer(final String hostnamePort)
2048 throws IOException {
2049 String hostname = Addressing.parseHostname(hostnamePort);
2050 int port = Addressing.parsePort(hostnamePort);
2051 final AdminService.BlockingInterface admin =
2052 this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2053 // TODO: There is no timeout on this controller. Set one!
2054 HBaseRpcController controller = rpcControllerFactory.newController();
2055 controller.setPriority(HConstants.HIGH_QOS);
2056 StopServerRequest request = RequestConverter.buildStopServerRequest(
2057 "Called by admin client " + this.connection.toString());
2058 try {
2059 admin.stopServer(controller, request);
2060 } catch (Exception e) {
2061 throw ProtobufUtil.handleRemoteException(e);
2065 @Override
2066 public boolean isMasterInMaintenanceMode() throws IOException {
2067 return executeCallable(new MasterCallable<IsInMaintenanceModeResponse>(getConnection(),
2068 this.rpcControllerFactory) {
2069 @Override
2070 protected IsInMaintenanceModeResponse rpcCall() throws Exception {
2071 return master.isMasterInMaintenanceMode(getRpcController(),
2072 IsInMaintenanceModeRequest.newBuilder().build());
2074 }).getInMaintenanceMode();
2077 @Override
2078 public ClusterStatus getClusterStatus() throws IOException {
2079 return getClusterStatus(EnumSet.allOf(Option.class));
2082 @Override
2083 public ClusterStatus getClusterStatus(EnumSet<Option> options) throws IOException {
2084 return executeCallable(new MasterCallable<ClusterStatus>(getConnection(),
2085 this.rpcControllerFactory) {
2086 @Override
2087 protected ClusterStatus rpcCall() throws Exception {
2088 GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest(options);
2089 return ProtobufUtil.convert(
2090 master.getClusterStatus(getRpcController(), req).getClusterStatus());
2095 @Override
2096 public List<RegionLoad> getRegionLoads(ServerName serverName, TableName tableName)
2097 throws IOException {
2098 AdminService.BlockingInterface admin = this.connection.getAdmin(serverName);
2099 HBaseRpcController controller = rpcControllerFactory.newController();
2100 return ProtobufUtil.getRegionLoad(controller, admin, tableName);
2103 @Override
2104 public Configuration getConfiguration() {
2105 return this.conf;
2109 * Do a get with a timeout against the passed in <code>future</code>.
2111 private static <T> T get(final Future<T> future, final long timeout, final TimeUnit units)
2112 throws IOException {
2113 try {
2114 // TODO: how long should we wait? Spin forever?
2115 return future.get(timeout, units);
2116 } catch (InterruptedException e) {
2117 throw new InterruptedIOException("Interrupt while waiting on " + future);
2118 } catch (TimeoutException e) {
2119 throw new TimeoutIOException(e);
2120 } catch (ExecutionException e) {
2121 if (e.getCause() instanceof IOException) {
2122 throw (IOException)e.getCause();
2123 } else {
2124 throw new IOException(e.getCause());
2129 @Override
2130 public void createNamespace(final NamespaceDescriptor descriptor)
2131 throws IOException {
2132 get(createNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
2135 @Override
2136 public Future<Void> createNamespaceAsync(final NamespaceDescriptor descriptor)
2137 throws IOException {
2138 CreateNamespaceResponse response =
2139 executeCallable(new MasterCallable<CreateNamespaceResponse>(getConnection(),
2140 getRpcControllerFactory()) {
2141 @Override
2142 protected CreateNamespaceResponse rpcCall() throws Exception {
2143 return master.createNamespace(getRpcController(),
2144 CreateNamespaceRequest.newBuilder().setNamespaceDescriptor(ProtobufUtil.
2145 toProtoNamespaceDescriptor(descriptor)).build());
2148 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
2149 @Override
2150 public String getOperationType() {
2151 return "CREATE_NAMESPACE";
2156 @Override
2157 public void modifyNamespace(final NamespaceDescriptor descriptor)
2158 throws IOException {
2159 get(modifyNamespaceAsync(descriptor), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
2162 @Override
2163 public Future<Void> modifyNamespaceAsync(final NamespaceDescriptor descriptor)
2164 throws IOException {
2165 ModifyNamespaceResponse response =
2166 executeCallable(new MasterCallable<ModifyNamespaceResponse>(getConnection(),
2167 getRpcControllerFactory()) {
2168 @Override
2169 protected ModifyNamespaceResponse rpcCall() throws Exception {
2170 // TODO: set priority based on NS?
2171 return master.modifyNamespace(getRpcController(), ModifyNamespaceRequest.newBuilder().
2172 setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2175 return new NamespaceFuture(this, descriptor.getName(), response.getProcId()) {
2176 @Override
2177 public String getOperationType() {
2178 return "MODIFY_NAMESPACE";
2183 @Override
2184 public void deleteNamespace(final String name)
2185 throws IOException {
2186 get(deleteNamespaceAsync(name), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
2189 @Override
2190 public Future<Void> deleteNamespaceAsync(final String name)
2191 throws IOException {
2192 DeleteNamespaceResponse response =
2193 executeCallable(new MasterCallable<DeleteNamespaceResponse>(getConnection(),
2194 getRpcControllerFactory()) {
2195 @Override
2196 protected DeleteNamespaceResponse rpcCall() throws Exception {
2197 // TODO: set priority based on NS?
2198 return master.deleteNamespace(getRpcController(), DeleteNamespaceRequest.newBuilder().
2199 setNamespaceName(name).build());
2202 return new NamespaceFuture(this, name, response.getProcId()) {
2203 @Override
2204 public String getOperationType() {
2205 return "DELETE_NAMESPACE";
2210 @Override
2211 public NamespaceDescriptor getNamespaceDescriptor(final String name)
2212 throws NamespaceNotFoundException, IOException {
2213 return executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection(),
2214 getRpcControllerFactory()) {
2215 @Override
2216 protected NamespaceDescriptor rpcCall() throws Exception {
2217 return ProtobufUtil.toNamespaceDescriptor(
2218 master.getNamespaceDescriptor(getRpcController(),
2219 GetNamespaceDescriptorRequest.newBuilder().
2220 setNamespaceName(name).build()).getNamespaceDescriptor());
2225 @Override
2226 public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
2227 return executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection(),
2228 getRpcControllerFactory()) {
2229 @Override
2230 protected NamespaceDescriptor[] rpcCall() throws Exception {
2231 List<HBaseProtos.NamespaceDescriptor> list =
2232 master.listNamespaceDescriptors(getRpcController(),
2233 ListNamespaceDescriptorsRequest.newBuilder().build()).getNamespaceDescriptorList();
2234 NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
2235 for(int i = 0; i < list.size(); i++) {
2236 res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
2238 return res;
2243 @Override
2244 public String getProcedures() throws IOException {
2245 return executeCallable(new MasterCallable<String>(getConnection(),
2246 getRpcControllerFactory()) {
2247 @Override
2248 protected String rpcCall() throws Exception {
2249 GetProceduresRequest request = GetProceduresRequest.newBuilder().build();
2250 GetProceduresResponse response = master.getProcedures(getRpcController(), request);
2251 return ProtobufUtil.toProcedureJson(response.getProcedureList());
2256 @Override
2257 public String getLocks() throws IOException {
2258 return executeCallable(new MasterCallable<String>(getConnection(),
2259 getRpcControllerFactory()) {
2260 @Override
2261 protected String rpcCall() throws Exception {
2262 GetLocksRequest request = GetLocksRequest.newBuilder().build();
2263 GetLocksResponse response = master.getLocks(getRpcController(), request);
2264 return ProtobufUtil.toLockJson(response.getLockList());
2269 @Override
2270 public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
2271 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(),
2272 getRpcControllerFactory()) {
2273 @Override
2274 protected HTableDescriptor[] rpcCall() throws Exception {
2275 List<TableSchema> list =
2276 master.listTableDescriptorsByNamespace(getRpcController(),
2277 ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name)
2278 .build()).getTableSchemaList();
2279 HTableDescriptor[] res = new HTableDescriptor[list.size()];
2280 for(int i=0; i < list.size(); i++) {
2281 res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i)));
2283 return res;
2288 @Override
2289 public TableName[] listTableNamesByNamespace(final String name) throws IOException {
2290 return executeCallable(new MasterCallable<TableName[]>(getConnection(),
2291 getRpcControllerFactory()) {
2292 @Override
2293 protected TableName[] rpcCall() throws Exception {
2294 List<HBaseProtos.TableName> tableNames =
2295 master.listTableNamesByNamespace(getRpcController(), ListTableNamesByNamespaceRequest.
2296 newBuilder().setNamespaceName(name).build())
2297 .getTableNameList();
2298 TableName[] result = new TableName[tableNames.size()];
2299 for (int i = 0; i < tableNames.size(); i++) {
2300 result[i] = ProtobufUtil.toTableName(tableNames.get(i));
2302 return result;
2308 * Is HBase available? Throw an exception if not.
2309 * @param conf system configuration
2310 * @throws MasterNotRunningException if the master is not running.
2311 * @throws ZooKeeperConnectionException if unable to connect to zookeeper. // TODO do not expose
2312 * ZKConnectionException.
2314 public static void available(final Configuration conf)
2315 throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2316 Configuration copyOfConf = HBaseConfiguration.create(conf);
2317 // We set it to make it fail as soon as possible if HBase is not available
2318 copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
2319 copyOfConf.setInt("zookeeper.recovery.retry", 0);
2321 // Check ZK first.
2322 // If the connection exists, we may have a connection to ZK that does not work anymore
2323 try (ClusterConnection connection =
2324 (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) {
2325 // can throw MasterNotRunningException
2326 connection.isMasterRunning();
2332 * @param tableName
2333 * @return List of {@link HRegionInfo}.
2334 * @throws IOException
2335 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
2336 * Use {@link #getRegions(TableName)}.
2338 @Deprecated
2339 @Override
2340 public List<HRegionInfo> getTableRegions(final TableName tableName)
2341 throws IOException {
2342 return getRegions(tableName).stream()
2343 .map(ImmutableHRegionInfo::new)
2344 .collect(Collectors.toList());
2347 @Override
2348 public synchronized void close() throws IOException {
2351 @Override
2352 public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
2353 throws IOException {
2354 return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection(),
2355 getRpcControllerFactory()) {
2356 @Override
2357 protected HTableDescriptor[] rpcCall() throws Exception {
2358 GetTableDescriptorsRequest req =
2359 RequestConverter.buildGetTableDescriptorsRequest(tableNames);
2360 return ProtobufUtil
2361 .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream()
2362 .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
2367 @Override
2368 public HTableDescriptor[] getTableDescriptors(List<String> names)
2369 throws IOException {
2370 List<TableName> tableNames = new ArrayList<>(names.size());
2371 for(String name : names) {
2372 tableNames.add(TableName.valueOf(name));
2374 return getTableDescriptorsByTableName(tableNames);
2377 private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
2378 FailedLogCloseException {
2379 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2380 RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
2381 // TODO: There is no timeout on this controller. Set one!
2382 HBaseRpcController controller = rpcControllerFactory.newController();
2383 try {
2384 return admin.rollWALWriter(controller, request);
2385 } catch (ServiceException e) {
2386 throw ProtobufUtil.handleRemoteException(e);
2391 * Roll the log writer. I.e. when using a file system based write ahead log,
2392 * start writing log messages to a new file.
2394 * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
2395 * This method will return as soon as the roll is requested and the return value will
2396 * always be null. Additionally, the named region server may schedule store flushes at the
2397 * request of the wal handling the roll request.
2399 * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
2400 * return value may be either null or a list of encoded region names.
2402 * @param serverName
2403 * The servername of the regionserver. A server name is made of host,
2404 * port and startcode. This is mandatory. Here is an example:
2405 * <code> host187.example.com,60020,1289493121758</code>
2406 * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
2407 * clean up some underlying files. null if there's nothing to flush.
2408 * @throws IOException if a remote or network exception occurs
2409 * @throws FailedLogCloseException
2410 * @deprecated use {@link #rollWALWriter(ServerName)}
2412 @Deprecated
2413 public synchronized byte[][] rollHLogWriter(String serverName)
2414 throws IOException, FailedLogCloseException {
2415 ServerName sn = ServerName.valueOf(serverName);
2416 final RollWALWriterResponse response = rollWALWriterImpl(sn);
2417 int regionCount = response.getRegionToFlushCount();
2418 if (0 == regionCount) {
2419 return null;
2421 byte[][] regionsToFlush = new byte[regionCount][];
2422 for (int i = 0; i < regionCount; i++) {
2423 regionsToFlush[i] = ProtobufUtil.toBytes(response.getRegionToFlush(i));
2425 return regionsToFlush;
2428 @Override
2429 public synchronized void rollWALWriter(ServerName serverName)
2430 throws IOException, FailedLogCloseException {
2431 rollWALWriterImpl(serverName);
2434 @Override
2435 public String[] getMasterCoprocessors() {
2436 try {
2437 return getClusterStatus(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessors();
2438 } catch (IOException e) {
2439 LOG.error("Could not getClusterStatus()",e);
2440 return null;
2444 @Override
2445 public CompactionState getCompactionState(final TableName tableName)
2446 throws IOException {
2447 return getCompactionState(tableName, CompactType.NORMAL);
2450 @Override
2451 public CompactionState getCompactionStateForRegion(final byte[] regionName)
2452 throws IOException {
2453 final Pair<RegionInfo, ServerName> regionServerPair = getRegion(regionName);
2454 if (regionServerPair == null) {
2455 throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2457 if (regionServerPair.getSecond() == null) {
2458 throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2460 ServerName sn = regionServerPair.getSecond();
2461 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2462 // TODO: There is no timeout on this controller. Set one!
2463 HBaseRpcController controller = rpcControllerFactory.newController();
2464 GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
2465 regionServerPair.getFirst().getRegionName(), true);
2466 GetRegionInfoResponse response;
2467 try {
2468 response = admin.getRegionInfo(controller, request);
2469 } catch (ServiceException e) {
2470 throw ProtobufUtil.handleRemoteException(e);
2472 if (response.getCompactionState() != null) {
2473 return ProtobufUtil.createCompactionState(response.getCompactionState());
2475 return null;
2478 @Override
2479 public void snapshot(final String snapshotName,
2480 final TableName tableName) throws IOException,
2481 SnapshotCreationException, IllegalArgumentException {
2482 snapshot(snapshotName, tableName, SnapshotType.FLUSH);
2485 @Override
2486 public void snapshot(final byte[] snapshotName, final TableName tableName)
2487 throws IOException, SnapshotCreationException, IllegalArgumentException {
2488 snapshot(Bytes.toString(snapshotName), tableName, SnapshotType.FLUSH);
2491 @Override
2492 public void snapshot(final String snapshotName, final TableName tableName,
2493 SnapshotType type)
2494 throws IOException, SnapshotCreationException, IllegalArgumentException {
2495 snapshot(new SnapshotDescription(snapshotName, tableName, type));
2498 @Override
2499 public void snapshot(SnapshotDescription snapshotDesc)
2500 throws IOException, SnapshotCreationException, IllegalArgumentException {
2501 // actually take the snapshot
2502 SnapshotProtos.SnapshotDescription snapshot =
2503 ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
2504 SnapshotResponse response = asyncSnapshot(snapshot);
2505 final IsSnapshotDoneRequest request =
2506 IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build();
2507 IsSnapshotDoneResponse done = null;
2508 long start = EnvironmentEdgeManager.currentTime();
2509 long max = response.getExpectedTimeout();
2510 long maxPauseTime = max / this.numRetries;
2511 int tries = 0;
2512 LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
2513 ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
2514 maxPauseTime + " ms per retry)");
2515 while (tries == 0
2516 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
2517 try {
2518 // sleep a backoff <= pauseTime amount
2519 long sleep = getPauseTime(tries++);
2520 sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2521 LOG.debug("(#" + tries + ") Sleeping: " + sleep +
2522 "ms while waiting for snapshot completion.");
2523 Thread.sleep(sleep);
2524 } catch (InterruptedException e) {
2525 throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
2527 LOG.debug("Getting current status of snapshot from master...");
2528 done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(),
2529 getRpcControllerFactory()) {
2530 @Override
2531 protected IsSnapshotDoneResponse rpcCall() throws Exception {
2532 return master.isSnapshotDone(getRpcController(), request);
2536 if (!done.getDone()) {
2537 throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
2538 + "' wasn't completed in expectedTime:" + max + " ms", snapshotDesc);
2542 @Override
2543 public void snapshotAsync(SnapshotDescription snapshotDesc) throws IOException,
2544 SnapshotCreationException {
2545 asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc));
2548 private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot)
2549 throws IOException {
2550 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2551 final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
2552 .build();
2553 // run the snapshot on the master
2554 return executeCallable(new MasterCallable<SnapshotResponse>(getConnection(),
2555 getRpcControllerFactory()) {
2556 @Override
2557 protected SnapshotResponse rpcCall() throws Exception {
2558 return master.snapshot(getRpcController(), request);
2563 @Override
2564 public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc)
2565 throws IOException, HBaseSnapshotException, UnknownSnapshotException {
2566 final SnapshotProtos.SnapshotDescription snapshot =
2567 ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
2568 return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(),
2569 getRpcControllerFactory()) {
2570 @Override
2571 protected IsSnapshotDoneResponse rpcCall() throws Exception {
2572 return master.isSnapshotDone(getRpcController(),
2573 IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
2575 }).getDone();
2578 @Override
2579 public void restoreSnapshot(final byte[] snapshotName)
2580 throws IOException, RestoreSnapshotException {
2581 restoreSnapshot(Bytes.toString(snapshotName));
2584 @Override
2585 public void restoreSnapshot(final String snapshotName)
2586 throws IOException, RestoreSnapshotException {
2587 boolean takeFailSafeSnapshot =
2588 conf.getBoolean(HConstants.SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT,
2589 HConstants.DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT);
2590 restoreSnapshot(snapshotName, takeFailSafeSnapshot);
2593 @Override
2594 public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
2595 throws IOException, RestoreSnapshotException {
2596 restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
2600 * Check whether the snapshot exists and contains disabled table
2602 * @param snapshotName name of the snapshot to restore
2603 * @throws IOException if a remote or network exception occurs
2604 * @throws RestoreSnapshotException if no valid snapshot is found
2606 private TableName getTableNameBeforeRestoreSnapshot(final String snapshotName)
2607 throws IOException, RestoreSnapshotException {
2608 TableName tableName = null;
2609 for (SnapshotDescription snapshotInfo: listSnapshots()) {
2610 if (snapshotInfo.getName().equals(snapshotName)) {
2611 tableName = snapshotInfo.getTableName();
2612 break;
2616 if (tableName == null) {
2617 throw new RestoreSnapshotException(
2618 "Unable to find the table name for snapshot=" + snapshotName);
2620 return tableName;
2623 @Override
2624 public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
2625 throws IOException, RestoreSnapshotException {
2626 restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
2629 @Override
2630 public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot,
2631 final boolean restoreAcl) throws IOException, RestoreSnapshotException {
2632 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName);
2634 // The table does not exists, switch to clone.
2635 if (!tableExists(tableName)) {
2636 cloneSnapshot(snapshotName, tableName, restoreAcl);
2637 return;
2640 // Check if the table is disabled
2641 if (!isTableDisabled(tableName)) {
2642 throw new TableNotDisabledException(tableName);
2645 // Take a snapshot of the current state
2646 String failSafeSnapshotSnapshotName = null;
2647 if (takeFailSafeSnapshot) {
2648 failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
2649 "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
2650 failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
2651 .replace("{snapshot.name}", snapshotName)
2652 .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
2653 .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
2654 LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
2655 snapshot(failSafeSnapshotSnapshotName, tableName);
2658 try {
2659 // Restore snapshot
2660 get(
2661 internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
2662 syncWaitTimeout,
2663 TimeUnit.MILLISECONDS);
2664 } catch (IOException e) {
2665 // Something went wrong during the restore...
2666 // if the pre-restore snapshot is available try to rollback
2667 if (takeFailSafeSnapshot) {
2668 try {
2669 get(
2670 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl),
2671 syncWaitTimeout,
2672 TimeUnit.MILLISECONDS);
2673 String msg = "Restore snapshot=" + snapshotName +
2674 " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
2675 LOG.error(msg, e);
2676 throw new RestoreSnapshotException(msg, e);
2677 } catch (IOException ex) {
2678 String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
2679 LOG.error(msg, ex);
2680 throw new RestoreSnapshotException(msg, e);
2682 } else {
2683 throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
2687 // If the restore is succeeded, delete the pre-restore snapshot
2688 if (takeFailSafeSnapshot) {
2689 try {
2690 LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
2691 deleteSnapshot(failSafeSnapshotSnapshotName);
2692 } catch (IOException e) {
2693 LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
2698 @Override
2699 public Future<Void> restoreSnapshotAsync(final String snapshotName)
2700 throws IOException, RestoreSnapshotException {
2701 TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName);
2703 // The table does not exists, switch to clone.
2704 if (!tableExists(tableName)) {
2705 return cloneSnapshotAsync(snapshotName, tableName);
2708 // Check if the table is disabled
2709 if (!isTableDisabled(tableName)) {
2710 throw new TableNotDisabledException(tableName);
2713 return internalRestoreSnapshotAsync(snapshotName, tableName, false);
2716 @Override
2717 public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
2718 throws IOException, TableExistsException, RestoreSnapshotException {
2719 cloneSnapshot(Bytes.toString(snapshotName), tableName);
2722 @Override
2723 public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
2724 throws IOException, TableExistsException, RestoreSnapshotException {
2725 if (tableExists(tableName)) {
2726 throw new TableExistsException(tableName);
2728 get(
2729 internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
2730 Integer.MAX_VALUE,
2731 TimeUnit.MILLISECONDS);
2734 @Override
2735 public void cloneSnapshot(final String snapshotName, final TableName tableName)
2736 throws IOException, TableExistsException, RestoreSnapshotException {
2737 cloneSnapshot(snapshotName, tableName, false);
2740 @Override
2741 public Future<Void> cloneSnapshotAsync(final String snapshotName, final TableName tableName)
2742 throws IOException, TableExistsException {
2743 if (tableExists(tableName)) {
2744 throw new TableExistsException(tableName);
2746 return internalRestoreSnapshotAsync(snapshotName, tableName, false);
2749 @Override
2750 public byte[] execProcedureWithReturn(String signature, String instance, Map<String,
2751 String> props) throws IOException {
2752 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2753 final ExecProcedureRequest request =
2754 ExecProcedureRequest.newBuilder().setProcedure(desc).build();
2755 // run the procedure on the master
2756 ExecProcedureResponse response = executeCallable(
2757 new MasterCallable<ExecProcedureResponse>(getConnection(), getRpcControllerFactory()) {
2758 @Override
2759 protected ExecProcedureResponse rpcCall() throws Exception {
2760 return master.execProcedureWithRet(getRpcController(), request);
2764 return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
2767 @Override
2768 public void execProcedure(String signature, String instance, Map<String, String> props)
2769 throws IOException {
2770 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2771 final ExecProcedureRequest request =
2772 ExecProcedureRequest.newBuilder().setProcedure(desc).build();
2773 // run the procedure on the master
2774 ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
2775 getConnection(), getRpcControllerFactory()) {
2776 @Override
2777 protected ExecProcedureResponse rpcCall() throws Exception {
2778 return master.execProcedure(getRpcController(), request);
2782 long start = EnvironmentEdgeManager.currentTime();
2783 long max = response.getExpectedTimeout();
2784 long maxPauseTime = max / this.numRetries;
2785 int tries = 0;
2786 LOG.debug("Waiting a max of " + max + " ms for procedure '" +
2787 signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
2788 boolean done = false;
2789 while (tries == 0
2790 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
2791 try {
2792 // sleep a backoff <= pauseTime amount
2793 long sleep = getPauseTime(tries++);
2794 sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
2795 LOG.debug("(#" + tries + ") Sleeping: " + sleep +
2796 "ms while waiting for procedure completion.");
2797 Thread.sleep(sleep);
2798 } catch (InterruptedException e) {
2799 throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
2801 LOG.debug("Getting current status of procedure from master...");
2802 done = isProcedureFinished(signature, instance, props);
2804 if (!done) {
2805 throw new IOException("Procedure '" + signature + " : " + instance
2806 + "' wasn't completed in expectedTime:" + max + " ms");
2810 @Override
2811 public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
2812 throws IOException {
2813 ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props);
2814 return executeCallable(
2815 new MasterCallable<IsProcedureDoneResponse>(getConnection(), getRpcControllerFactory()) {
2816 @Override
2817 protected IsProcedureDoneResponse rpcCall() throws Exception {
2818 return master.isProcedureDone(getRpcController(),
2819 IsProcedureDoneRequest.newBuilder().setProcedure(desc).build());
2821 }).getDone();
2825 * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
2826 * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
2827 * create an HTable instance to this table before it is available.
2828 * @param snapshotName snapshot to restore
2829 * @param tableName table name to restore the snapshot on
2830 * @throws IOException if a remote or network exception occurs
2831 * @throws RestoreSnapshotException if snapshot failed to be restored
2832 * @throws IllegalArgumentException if the restore request is formatted incorrectly
2834 private Future<Void> internalRestoreSnapshotAsync(final String snapshotName,
2835 final TableName tableName, final boolean restoreAcl)
2836 throws IOException, RestoreSnapshotException {
2837 final SnapshotProtos.SnapshotDescription snapshot =
2838 SnapshotProtos.SnapshotDescription.newBuilder()
2839 .setName(snapshotName).setTable(tableName.getNameAsString()).build();
2841 // actually restore the snapshot
2842 ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
2844 RestoreSnapshotResponse response = executeCallable(
2845 new MasterCallable<RestoreSnapshotResponse>(getConnection(), getRpcControllerFactory()) {
2846 @Override
2847 protected RestoreSnapshotResponse rpcCall() throws Exception {
2848 final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder()
2849 .setSnapshot(snapshot)
2850 .setNonceGroup(ng.getNonceGroup())
2851 .setNonce(ng.newNonce())
2852 .setRestoreACL(restoreAcl)
2853 .build();
2854 return master.restoreSnapshot(getRpcController(), request);
2858 return new RestoreSnapshotFuture(this, snapshot, tableName, response);
2861 private static class RestoreSnapshotFuture extends TableFuture<Void> {
2862 public RestoreSnapshotFuture(
2863 final HBaseAdmin admin,
2864 final SnapshotProtos.SnapshotDescription snapshot,
2865 final TableName tableName,
2866 final RestoreSnapshotResponse response) {
2867 super(admin, tableName,
2868 (response != null && response.hasProcId()) ? response.getProcId() : null);
2870 if (response != null && !response.hasProcId()) {
2871 throw new UnsupportedOperationException("Client could not call old version of Server");
2875 public RestoreSnapshotFuture(
2876 final HBaseAdmin admin,
2877 final TableName tableName,
2878 final Long procId) {
2879 super(admin, tableName, procId);
2882 @Override
2883 public String getOperationType() {
2884 return "MODIFY";
2888 @Override
2889 public List<SnapshotDescription> listSnapshots() throws IOException {
2890 return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection(),
2891 getRpcControllerFactory()) {
2892 @Override
2893 protected List<SnapshotDescription> rpcCall() throws Exception {
2894 List<SnapshotProtos.SnapshotDescription> snapshotsList = master
2895 .getCompletedSnapshots(getRpcController(),
2896 GetCompletedSnapshotsRequest.newBuilder().build())
2897 .getSnapshotsList();
2898 List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size());
2899 for (SnapshotProtos.SnapshotDescription snapshot : snapshotsList) {
2900 result.add(ProtobufUtil.createSnapshotDesc(snapshot));
2902 return result;
2907 @Override
2908 public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
2909 return listSnapshots(Pattern.compile(regex));
2912 @Override
2913 public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
2914 List<SnapshotDescription> matched = new LinkedList<>();
2915 List<SnapshotDescription> snapshots = listSnapshots();
2916 for (SnapshotDescription snapshot : snapshots) {
2917 if (pattern.matcher(snapshot.getName()).matches()) {
2918 matched.add(snapshot);
2921 return matched;
2924 @Override
2925 public List<SnapshotDescription> listTableSnapshots(String tableNameRegex,
2926 String snapshotNameRegex) throws IOException {
2927 return listTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex));
2930 @Override
2931 public List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
2932 Pattern snapshotNamePattern) throws IOException {
2933 TableName[] tableNames = listTableNames(tableNamePattern);
2935 List<SnapshotDescription> tableSnapshots = new LinkedList<>();
2936 List<SnapshotDescription> snapshots = listSnapshots(snapshotNamePattern);
2938 List<TableName> listOfTableNames = Arrays.asList(tableNames);
2939 for (SnapshotDescription snapshot : snapshots) {
2940 if (listOfTableNames.contains(snapshot.getTableName())) {
2941 tableSnapshots.add(snapshot);
2944 return tableSnapshots;
2947 @Override
2948 public void deleteSnapshot(final byte[] snapshotName) throws IOException {
2949 deleteSnapshot(Bytes.toString(snapshotName));
2952 @Override
2953 public void deleteSnapshot(final String snapshotName) throws IOException {
2954 // make sure the snapshot is possibly valid
2955 TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
2956 // do the delete
2957 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2958 @Override
2959 protected Void rpcCall() throws Exception {
2960 master.deleteSnapshot(getRpcController(),
2961 DeleteSnapshotRequest.newBuilder().setSnapshot(
2962 SnapshotProtos.SnapshotDescription.newBuilder().setName(snapshotName).build())
2963 .build()
2965 return null;
2970 @Override
2971 public void deleteSnapshots(final String regex) throws IOException {
2972 deleteSnapshots(Pattern.compile(regex));
2975 @Override
2976 public void deleteSnapshots(final Pattern pattern) throws IOException {
2977 List<SnapshotDescription> snapshots = listSnapshots(pattern);
2978 for (final SnapshotDescription snapshot : snapshots) {
2979 try {
2980 internalDeleteSnapshot(snapshot);
2981 } catch (IOException ex) {
2982 LOG.info("Failed to delete snapshot " + snapshot.getName() + " for table "
2983 + snapshot.getTableNameAsString(), ex);
2988 private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
2989 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
2990 @Override
2991 protected Void rpcCall() throws Exception {
2992 this.master.deleteSnapshot(getRpcController(), DeleteSnapshotRequest.newBuilder()
2993 .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build());
2994 return null;
2999 @Override
3000 public void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex)
3001 throws IOException {
3002 deleteTableSnapshots(Pattern.compile(tableNameRegex), Pattern.compile(snapshotNameRegex));
3005 @Override
3006 public void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
3007 throws IOException {
3008 List<SnapshotDescription> snapshots = listTableSnapshots(tableNamePattern, snapshotNamePattern);
3009 for (SnapshotDescription snapshot : snapshots) {
3010 try {
3011 internalDeleteSnapshot(snapshot);
3012 LOG.debug("Successfully deleted snapshot: " + snapshot.getName());
3013 } catch (IOException e) {
3014 LOG.error("Failed to delete snapshot: " + snapshot.getName(), e);
3019 @Override
3020 public void setQuota(final QuotaSettings quota) throws IOException {
3021 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3022 @Override
3023 protected Void rpcCall() throws Exception {
3024 this.master.setQuota(getRpcController(), QuotaSettings.buildSetQuotaRequestProto(quota));
3025 return null;
3030 @Override
3031 public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
3032 return QuotaRetriever.open(conf, filter);
3035 @Override
3036 public List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException {
3037 List<QuotaSettings> quotas = new LinkedList<>();
3038 try (QuotaRetriever retriever = QuotaRetriever.open(conf, filter)) {
3039 Iterator<QuotaSettings> iterator = retriever.iterator();
3040 while (iterator.hasNext()) {
3041 quotas.add(iterator.next());
3044 return quotas;
3047 private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable)
3048 throws IOException {
3049 return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout);
3052 static private <C extends RetryingCallable<V> & Closeable, V> V executeCallable(C callable,
3053 RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout)
3054 throws IOException {
3055 RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller(rpcTimeout);
3056 try {
3057 return caller.callWithRetries(callable, operationTimeout);
3058 } finally {
3059 callable.close();
3063 @Override
3064 // Coprocessor Endpoint against the Master.
3065 public CoprocessorRpcChannel coprocessorService() {
3066 return new SyncCoprocessorRpcChannel() {
3067 @Override
3068 protected Message callExecService(final RpcController controller,
3069 final Descriptors.MethodDescriptor method, final Message request,
3070 final Message responsePrototype)
3071 throws IOException {
3072 if (LOG.isTraceEnabled()) {
3073 LOG.trace("Call: " + method.getName() + ", " + request.toString());
3075 // Try-with-resources so close gets called when we are done.
3076 try (MasterCallable<CoprocessorServiceResponse> callable =
3077 new MasterCallable<CoprocessorServiceResponse>(connection,
3078 connection.getRpcControllerFactory()) {
3079 @Override
3080 protected CoprocessorServiceResponse rpcCall() throws Exception {
3081 CoprocessorServiceRequest csr =
3082 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request);
3083 return this.master.execMasterService(getRpcController(), csr);
3085 };) {
3086 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
3087 callable.prepare(false);
3088 int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
3089 CoprocessorServiceResponse result = callable.call(operationTimeout);
3090 return CoprocessorRpcUtils.getResponse(result, responsePrototype);
3097 * Simple {@link Abortable}, throwing RuntimeException on abort.
3099 private static class ThrowableAbortable implements Abortable {
3100 @Override
3101 public void abort(String why, Throwable e) {
3102 throw new RuntimeException(why, e);
3105 @Override
3106 public boolean isAborted() {
3107 return true;
3111 @Override
3112 public CoprocessorRpcChannel coprocessorService(final ServerName serverName) {
3113 return new SyncCoprocessorRpcChannel() {
3114 @Override
3115 protected Message callExecService(RpcController controller,
3116 Descriptors.MethodDescriptor method, Message request, Message responsePrototype)
3117 throws IOException {
3118 if (LOG.isTraceEnabled()) {
3119 LOG.trace("Call: " + method.getName() + ", " + request.toString());
3121 CoprocessorServiceRequest csr =
3122 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request);
3123 // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
3124 // TODO: Make this same as RegionCoprocessorRpcChannel and MasterCoprocessorRpcChannel. They
3125 // are all different though should do same thing; e.g. RpcChannel setup.
3126 ClientProtos.ClientService.BlockingInterface stub = connection.getClient(serverName);
3127 CoprocessorServiceResponse result;
3128 try {
3129 result = stub.
3130 execRegionServerService(connection.getRpcControllerFactory().newController(), csr);
3131 return CoprocessorRpcUtils.getResponse(result, responsePrototype);
3132 } catch (ServiceException e) {
3133 throw ProtobufUtil.handleRemoteException(e);
3139 @Override
3140 public void updateConfiguration(final ServerName server) throws IOException {
3141 final AdminService.BlockingInterface admin = this.connection.getAdmin(server);
3142 Callable<Void> callable = new Callable<Void>() {
3143 @Override
3144 public Void call() throws Exception {
3145 admin.updateConfiguration(null, UpdateConfigurationRequest.getDefaultInstance());
3146 return null;
3149 ProtobufUtil.call(callable);
3152 @Override
3153 public void updateConfiguration() throws IOException {
3154 ClusterStatus status = getClusterStatus(
3155 EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS));
3156 for (ServerName server : status.getServers()) {
3157 updateConfiguration(server);
3160 updateConfiguration(status.getMaster());
3162 for (ServerName server : status.getBackupMasters()) {
3163 updateConfiguration(server);
3167 @Override
3168 public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
3169 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) {
3170 @Override
3171 protected Long rpcCall() throws Exception {
3172 MajorCompactionTimestampRequest req =
3173 MajorCompactionTimestampRequest.newBuilder()
3174 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
3175 return master.getLastMajorCompactionTimestamp(getRpcController(), req).
3176 getCompactionTimestamp();
3181 @Override
3182 public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
3183 return executeCallable(new MasterCallable<Long>(getConnection(), getRpcControllerFactory()) {
3184 @Override
3185 protected Long rpcCall() throws Exception {
3186 MajorCompactionTimestampForRegionRequest req =
3187 MajorCompactionTimestampForRegionRequest.newBuilder().setRegion(RequestConverter
3188 .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
3189 return master.getLastMajorCompactionTimestampForRegion(getRpcController(), req)
3190 .getCompactionTimestamp();
3196 * {@inheritDoc}
3198 @Override
3199 public void compact(final TableName tableName, final byte[] columnFamily, CompactType compactType)
3200 throws IOException, InterruptedException {
3201 compact(tableName, columnFamily, false, compactType);
3205 * {@inheritDoc}
3207 @Override
3208 public void compact(final TableName tableName, CompactType compactType)
3209 throws IOException, InterruptedException {
3210 compact(tableName, null, false, compactType);
3214 * {@inheritDoc}
3216 @Override
3217 public void majorCompact(final TableName tableName, final byte[] columnFamily,
3218 CompactType compactType) throws IOException, InterruptedException {
3219 compact(tableName, columnFamily, true, compactType);
3223 * {@inheritDoc}
3225 @Override
3226 public void majorCompact(final TableName tableName, CompactType compactType)
3227 throws IOException, InterruptedException {
3228 compact(tableName, null, true, compactType);
3232 * {@inheritDoc}
3234 @Override
3235 public CompactionState getCompactionState(final TableName tableName, CompactType compactType)
3236 throws IOException {
3237 AdminProtos.GetRegionInfoResponse.CompactionState state =
3238 AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
3239 checkTableExists(tableName);
3240 // TODO: There is no timeout on this controller. Set one!
3241 HBaseRpcController rpcController = rpcControllerFactory.newController();
3242 switch (compactType) {
3243 case MOB:
3244 final AdminProtos.AdminService.BlockingInterface masterAdmin =
3245 this.connection.getAdminForMaster();
3246 Callable<AdminProtos.GetRegionInfoResponse.CompactionState> callable =
3247 new Callable<AdminProtos.GetRegionInfoResponse.CompactionState>() {
3248 @Override
3249 public AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
3250 RegionInfo info = RegionInfo.createMobRegionInfo(tableName);
3251 GetRegionInfoRequest request =
3252 RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
3253 GetRegionInfoResponse response = masterAdmin.getRegionInfo(rpcController, request);
3254 return response.getCompactionState();
3257 state = ProtobufUtil.call(callable);
3258 break;
3259 case NORMAL:
3260 for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
3261 ServerName sn = loc.getServerName();
3262 if (sn == null) {
3263 continue;
3265 byte[] regionName = loc.getRegion().getRegionName();
3266 AdminService.BlockingInterface snAdmin = this.connection.getAdmin(sn);
3267 try {
3268 Callable<GetRegionInfoResponse> regionInfoCallable =
3269 new Callable<GetRegionInfoResponse>() {
3270 @Override
3271 public GetRegionInfoResponse call() throws Exception {
3272 GetRegionInfoRequest request =
3273 RequestConverter.buildGetRegionInfoRequest(regionName, true);
3274 return snAdmin.getRegionInfo(rpcController, request);
3277 GetRegionInfoResponse response = ProtobufUtil.call(regionInfoCallable);
3278 switch (response.getCompactionState()) {
3279 case MAJOR_AND_MINOR:
3280 return CompactionState.MAJOR_AND_MINOR;
3281 case MAJOR:
3282 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) {
3283 return CompactionState.MAJOR_AND_MINOR;
3285 state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR;
3286 break;
3287 case MINOR:
3288 if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) {
3289 return CompactionState.MAJOR_AND_MINOR;
3291 state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR;
3292 break;
3293 case NONE:
3294 default: // nothing, continue
3296 } catch (NotServingRegionException e) {
3297 if (LOG.isDebugEnabled()) {
3298 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " +
3299 StringUtils.stringifyException(e));
3301 } catch (RemoteException e) {
3302 if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3303 if (LOG.isDebugEnabled()) {
3304 LOG.debug("Trying to get compaction state of " + loc.getRegion() + ": " +
3305 StringUtils.stringifyException(e));
3307 } else {
3308 throw e;
3312 break;
3313 default:
3314 throw new IllegalArgumentException("Unknown compactType: " + compactType);
3316 if (state != null) {
3317 return ProtobufUtil.createCompactionState(state);
3319 return null;
3323 * Future that waits on a procedure result.
3324 * Returned by the async version of the Admin calls,
3325 * and used internally by the sync calls to wait on the result of the procedure.
3327 @InterfaceAudience.Private
3328 @InterfaceStability.Evolving
3329 protected static class ProcedureFuture<V> implements Future<V> {
3330 private ExecutionException exception = null;
3331 private boolean procResultFound = false;
3332 private boolean done = false;
3333 private boolean cancelled = false;
3334 private V result = null;
3336 private final HBaseAdmin admin;
3337 private final Long procId;
3339 public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
3340 this.admin = admin;
3341 this.procId = procId;
3344 @Override
3345 public boolean cancel(boolean mayInterruptIfRunning) {
3346 AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
3347 .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
3348 try {
3349 cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
3350 if (cancelled) {
3351 done = true;
3353 } catch (IOException e) {
3354 // Cancell thrown exception for some reason. At this time, we are not sure whether
3355 // the cancell succeeds or fails. We assume that it is failed, but print out a warning
3356 // for debugging purpose.
3357 LOG.warn(
3358 "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
3360 cancelled = false;
3362 return cancelled;
3365 @Override
3366 public boolean isCancelled() {
3367 return cancelled;
3370 protected AbortProcedureResponse abortProcedureResult(
3371 final AbortProcedureRequest request) throws IOException {
3372 return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
3373 admin.getConnection(), admin.getRpcControllerFactory()) {
3374 @Override
3375 protected AbortProcedureResponse rpcCall() throws Exception {
3376 return master.abortProcedure(getRpcController(), request);
3381 @Override
3382 public V get() throws InterruptedException, ExecutionException {
3383 // TODO: should we ever spin forever?
3384 throw new UnsupportedOperationException();
3387 @Override
3388 public V get(long timeout, TimeUnit unit)
3389 throws InterruptedException, ExecutionException, TimeoutException {
3390 if (!done) {
3391 long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
3392 try {
3393 try {
3394 // if the master support procedures, try to wait the result
3395 if (procId != null) {
3396 result = waitProcedureResult(procId, deadlineTs);
3398 // if we don't have a proc result, try the compatibility wait
3399 if (!procResultFound) {
3400 result = waitOperationResult(deadlineTs);
3402 result = postOperationResult(result, deadlineTs);
3403 done = true;
3404 } catch (IOException e) {
3405 result = postOperationFailure(e, deadlineTs);
3406 done = true;
3408 } catch (IOException e) {
3409 exception = new ExecutionException(e);
3410 done = true;
3413 if (exception != null) {
3414 throw exception;
3416 return result;
3419 @Override
3420 public boolean isDone() {
3421 return done;
3424 protected HBaseAdmin getAdmin() {
3425 return admin;
3428 private V waitProcedureResult(long procId, long deadlineTs)
3429 throws IOException, TimeoutException, InterruptedException {
3430 GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
3431 .setProcId(procId)
3432 .build();
3434 int tries = 0;
3435 IOException serviceEx = null;
3436 while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3437 GetProcedureResultResponse response = null;
3438 try {
3439 // Try to fetch the result
3440 response = getProcedureResult(request);
3441 } catch (IOException e) {
3442 serviceEx = unwrapException(e);
3444 // the master may be down
3445 LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
3447 // Not much to do, if we have a DoNotRetryIOException
3448 if (serviceEx instanceof DoNotRetryIOException) {
3449 // TODO: looks like there is no way to unwrap this exception and get the proper
3450 // UnsupportedOperationException aside from looking at the message.
3451 // anyway, if we fail here we just failover to the compatibility side
3452 // and that is always a valid solution.
3453 LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
3454 procResultFound = false;
3455 return null;
3459 // If the procedure is no longer running, we should have a result
3460 if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
3461 procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
3462 return convertResult(response);
3465 try {
3466 Thread.sleep(getAdmin().getPauseTime(tries++));
3467 } catch (InterruptedException e) {
3468 throw new InterruptedException(
3469 "Interrupted while waiting for the result of proc " + procId);
3472 if (serviceEx != null) {
3473 throw serviceEx;
3474 } else {
3475 throw new TimeoutException("The procedure " + procId + " is still running");
3479 private static IOException unwrapException(IOException e) {
3480 if (e instanceof RemoteException) {
3481 return ((RemoteException)e).unwrapRemoteException();
3483 return e;
3486 protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
3487 throws IOException {
3488 return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
3489 admin.getConnection(), admin.getRpcControllerFactory()) {
3490 @Override
3491 protected GetProcedureResultResponse rpcCall() throws Exception {
3492 return master.getProcedureResult(getRpcController(), request);
3498 * Convert the procedure result response to a specified type.
3499 * @param response the procedure result object to parse
3500 * @return the result data of the procedure.
3502 protected V convertResult(final GetProcedureResultResponse response) throws IOException {
3503 if (response.hasException()) {
3504 throw ForeignExceptionUtil.toIOException(response.getException());
3506 return null;
3510 * Fallback implementation in case the procedure is not supported by the server.
3511 * It should try to wait until the operation is completed.
3512 * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3513 * @return the result data of the operation
3515 protected V waitOperationResult(final long deadlineTs)
3516 throws IOException, TimeoutException {
3517 return null;
3521 * Called after the operation is completed and the result fetched. this allows to perform extra
3522 * steps after the procedure is completed. it allows to apply transformations to the result that
3523 * will be returned by get().
3524 * @param result the result of the procedure
3525 * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3526 * @return the result of the procedure, which may be the same as the passed one
3528 protected V postOperationResult(final V result, final long deadlineTs)
3529 throws IOException, TimeoutException {
3530 return result;
3534 * Called after the operation is terminated with a failure.
3535 * this allows to perform extra steps after the procedure is terminated.
3536 * it allows to apply transformations to the result that will be returned by get().
3537 * The default implementation will rethrow the exception
3538 * @param exception the exception got from fetching the result
3539 * @param deadlineTs the timestamp after which this method should throw a TimeoutException
3540 * @return the result of the procedure, which may be the same as the passed one
3542 protected V postOperationFailure(final IOException exception, final long deadlineTs)
3543 throws IOException, TimeoutException {
3544 throw exception;
3547 protected interface WaitForStateCallable {
3548 boolean checkState(int tries) throws IOException;
3549 void throwInterruptedException() throws InterruptedIOException;
3550 void throwTimeoutException(long elapsed) throws TimeoutException;
3553 protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
3554 throws IOException, TimeoutException {
3555 int tries = 0;
3556 IOException serverEx = null;
3557 long startTime = EnvironmentEdgeManager.currentTime();
3558 while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3559 serverEx = null;
3560 try {
3561 if (callable.checkState(tries)) {
3562 return;
3564 } catch (IOException e) {
3565 serverEx = e;
3567 try {
3568 Thread.sleep(getAdmin().getPauseTime(tries++));
3569 } catch (InterruptedException e) {
3570 callable.throwInterruptedException();
3573 if (serverEx != null) {
3574 throw unwrapException(serverEx);
3575 } else {
3576 callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
3581 @InterfaceAudience.Private
3582 @InterfaceStability.Evolving
3583 protected static abstract class TableFuture<V> extends ProcedureFuture<V> {
3584 private final TableName tableName;
3586 public TableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
3587 super(admin, procId);
3588 this.tableName = tableName;
3591 @Override
3592 public String toString() {
3593 return getDescription();
3597 * @return the table name
3599 protected TableName getTableName() {
3600 return tableName;
3604 * @return the table descriptor
3606 protected TableDescriptor getTableDescriptor() throws IOException {
3607 return getAdmin().getDescriptor(getTableName());
3611 * @return the operation type like CREATE, DELETE, DISABLE etc.
3613 public abstract String getOperationType();
3616 * @return a description of the operation
3618 protected String getDescription() {
3619 return "Operation: " + getOperationType() + ", "
3620 + "Table Name: " + tableName.getNameWithNamespaceInclAsString();
3624 protected abstract class TableWaitForStateCallable implements WaitForStateCallable {
3625 @Override
3626 public void throwInterruptedException() throws InterruptedIOException {
3627 throw new InterruptedIOException("Interrupted while waiting for operation: "
3628 + getOperationType() + " on table: " + tableName.getNameWithNamespaceInclAsString());
3631 @Override
3632 public void throwTimeoutException(long elapsedTime) throws TimeoutException {
3633 throw new TimeoutException("The operation: " + getOperationType() + " on table: " +
3634 tableName.getNameAsString() + " has not completed after " + elapsedTime + "ms");
3638 @Override
3639 protected V postOperationResult(final V result, final long deadlineTs)
3640 throws IOException, TimeoutException {
3641 LOG.info(getDescription() + " completed");
3642 return super.postOperationResult(result, deadlineTs);
3645 @Override
3646 protected V postOperationFailure(final IOException exception, final long deadlineTs)
3647 throws IOException, TimeoutException {
3648 LOG.info(getDescription() + " failed with " + exception.getMessage());
3649 return super.postOperationFailure(exception, deadlineTs);
3652 protected void waitForTableEnabled(final long deadlineTs)
3653 throws IOException, TimeoutException {
3654 waitForState(deadlineTs, new TableWaitForStateCallable() {
3655 @Override
3656 public boolean checkState(int tries) throws IOException {
3657 try {
3658 if (getAdmin().isTableAvailable(tableName)) {
3659 return true;
3661 } catch (TableNotFoundException tnfe) {
3662 LOG.debug("Table " + tableName.getNameWithNamespaceInclAsString()
3663 + " was not enabled, sleeping. tries=" + tries);
3665 return false;
3670 protected void waitForTableDisabled(final long deadlineTs)
3671 throws IOException, TimeoutException {
3672 waitForState(deadlineTs, new TableWaitForStateCallable() {
3673 @Override
3674 public boolean checkState(int tries) throws IOException {
3675 return getAdmin().isTableDisabled(tableName);
3680 protected void waitTableNotFound(final long deadlineTs)
3681 throws IOException, TimeoutException {
3682 waitForState(deadlineTs, new TableWaitForStateCallable() {
3683 @Override
3684 public boolean checkState(int tries) throws IOException {
3685 return !getAdmin().tableExists(tableName);
3690 protected void waitForSchemaUpdate(final long deadlineTs)
3691 throws IOException, TimeoutException {
3692 waitForState(deadlineTs, new TableWaitForStateCallable() {
3693 @Override
3694 public boolean checkState(int tries) throws IOException {
3695 return getAdmin().getAlterStatus(tableName).getFirst() == 0;
3700 protected void waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
3701 throws IOException, TimeoutException {
3702 final TableDescriptor desc = getTableDescriptor();
3703 final AtomicInteger actualRegCount = new AtomicInteger(0);
3704 final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3705 @Override
3706 public boolean visit(Result rowResult) throws IOException {
3707 RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
3708 if (list == null) {
3709 LOG.warn("No serialized HRegionInfo in " + rowResult);
3710 return true;
3712 HRegionLocation l = list.getRegionLocation();
3713 if (l == null) {
3714 return true;
3716 if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
3717 return false;
3719 if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
3720 HRegionLocation[] locations = list.getRegionLocations();
3721 for (HRegionLocation location : locations) {
3722 if (location == null) continue;
3723 ServerName serverName = location.getServerName();
3724 // Make sure that regions are assigned to server
3725 if (serverName != null && serverName.getHostAndPort() != null) {
3726 actualRegCount.incrementAndGet();
3729 return true;
3733 int tries = 0;
3734 int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
3735 while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
3736 actualRegCount.set(0);
3737 MetaTableAccessor.scanMetaForTableRegions(getAdmin().getConnection(), visitor,
3738 desc.getTableName());
3739 if (actualRegCount.get() == numRegs) {
3740 // all the regions are online
3741 return;
3744 try {
3745 Thread.sleep(getAdmin().getPauseTime(tries++));
3746 } catch (InterruptedException e) {
3747 throw new InterruptedIOException("Interrupted when opening" + " regions; "
3748 + actualRegCount.get() + " of " + numRegs + " regions processed so far");
3751 throw new TimeoutException("Only " + actualRegCount.get() + " of " + numRegs
3752 + " regions are online; retries exhausted.");
3756 @InterfaceAudience.Private
3757 @InterfaceStability.Evolving
3758 protected static abstract class NamespaceFuture extends ProcedureFuture<Void> {
3759 private final String namespaceName;
3761 public NamespaceFuture(final HBaseAdmin admin, final String namespaceName, final Long procId) {
3762 super(admin, procId);
3763 this.namespaceName = namespaceName;
3767 * @return the namespace name
3769 protected String getNamespaceName() {
3770 return namespaceName;
3774 * @return the operation type like CREATE_NAMESPACE, DELETE_NAMESPACE, etc.
3776 public abstract String getOperationType();
3778 @Override
3779 public String toString() {
3780 return "Operation: " + getOperationType() + ", Namespace: " + getNamespaceName();
3784 @Override
3785 public List<SecurityCapability> getSecurityCapabilities() throws IOException {
3786 try {
3787 return executeCallable(new MasterCallable<List<SecurityCapability>>(getConnection(),
3788 getRpcControllerFactory()) {
3789 @Override
3790 protected List<SecurityCapability> rpcCall() throws Exception {
3791 SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build();
3792 return ProtobufUtil.toSecurityCapabilityList(
3793 master.getSecurityCapabilities(getRpcController(), req).getCapabilitiesList());
3796 } catch (IOException e) {
3797 if (e instanceof RemoteException) {
3798 e = ((RemoteException)e).unwrapRemoteException();
3800 throw e;
3804 @Override
3805 public boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException {
3806 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.SPLIT);
3809 @Override
3810 public boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException {
3811 return splitOrMergeSwitch(enabled, synchronous, MasterSwitchType.MERGE);
3814 private boolean splitOrMergeSwitch(boolean enabled, boolean synchronous,
3815 MasterSwitchType switchType) throws IOException {
3816 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3817 @Override
3818 protected Boolean rpcCall() throws Exception {
3819 MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(
3820 getRpcController(),
3821 RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType));
3822 return response.getPrevValueList().get(0);
3827 @Override
3828 public boolean isSplitEnabled() throws IOException {
3829 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3830 @Override
3831 protected Boolean rpcCall() throws Exception {
3832 return master.isSplitOrMergeEnabled(getRpcController(),
3833 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled();
3838 @Override
3839 public boolean isMergeEnabled() throws IOException {
3840 return executeCallable(new MasterCallable<Boolean>(getConnection(), getRpcControllerFactory()) {
3841 @Override
3842 protected Boolean rpcCall() throws Exception {
3843 return master.isSplitOrMergeEnabled(getRpcController(),
3844 RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled();
3849 private RpcControllerFactory getRpcControllerFactory() {
3850 return this.rpcControllerFactory;
3853 @Override
3854 public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
3855 throws IOException {
3856 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3857 @Override
3858 protected Void rpcCall() throws Exception {
3859 master.addReplicationPeer(getRpcController(),
3860 RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled));
3861 return null;
3866 @Override
3867 public void removeReplicationPeer(String peerId) throws IOException {
3868 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3869 @Override
3870 protected Void rpcCall() throws Exception {
3871 master.removeReplicationPeer(getRpcController(),
3872 RequestConverter.buildRemoveReplicationPeerRequest(peerId));
3873 return null;
3878 @Override
3879 public void enableReplicationPeer(final String peerId) throws IOException {
3880 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3881 @Override
3882 protected Void rpcCall() throws Exception {
3883 master.enableReplicationPeer(getRpcController(),
3884 RequestConverter.buildEnableReplicationPeerRequest(peerId));
3885 return null;
3890 @Override
3891 public void disableReplicationPeer(final String peerId) throws IOException {
3892 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3893 @Override
3894 protected Void rpcCall() throws Exception {
3895 master.disableReplicationPeer(getRpcController(),
3896 RequestConverter.buildDisableReplicationPeerRequest(peerId));
3897 return null;
3902 @Override
3903 public ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException {
3904 return executeCallable(new MasterCallable<ReplicationPeerConfig>(getConnection(),
3905 getRpcControllerFactory()) {
3906 @Override
3907 protected ReplicationPeerConfig rpcCall() throws Exception {
3908 GetReplicationPeerConfigResponse response = master.getReplicationPeerConfig(
3909 getRpcController(), RequestConverter.buildGetReplicationPeerConfigRequest(peerId));
3910 return ReplicationPeerConfigUtil.convert(response.getPeerConfig());
3915 @Override
3916 public void updateReplicationPeerConfig(final String peerId,
3917 final ReplicationPeerConfig peerConfig) throws IOException {
3918 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3919 @Override
3920 protected Void rpcCall() throws Exception {
3921 master.updateReplicationPeerConfig(getRpcController(),
3922 RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig));
3923 return null;
3928 @Override
3929 public void appendReplicationPeerTableCFs(String id,
3930 Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException,
3931 IOException {
3932 if (tableCfs == null) {
3933 throw new ReplicationException("tableCfs is null");
3935 ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
3936 ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
3937 updateReplicationPeerConfig(id, peerConfig);
3940 @Override
3941 public void removeReplicationPeerTableCFs(String id,
3942 Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException,
3943 IOException {
3944 if (tableCfs == null) {
3945 throw new ReplicationException("tableCfs is null");
3947 ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
3948 ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
3949 updateReplicationPeerConfig(id, peerConfig);
3952 @Override
3953 public List<ReplicationPeerDescription> listReplicationPeers() throws IOException {
3954 return listReplicationPeers((Pattern)null);
3957 @Override
3958 public List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern)
3959 throws IOException {
3960 return executeCallable(new MasterCallable<List<ReplicationPeerDescription>>(getConnection(),
3961 getRpcControllerFactory()) {
3962 @Override
3963 protected List<ReplicationPeerDescription> rpcCall() throws Exception {
3964 List<ReplicationProtos.ReplicationPeerDescription> peersList = master.listReplicationPeers(
3965 getRpcController(), RequestConverter.buildListReplicationPeersRequest(pattern))
3966 .getPeerDescList();
3967 List<ReplicationPeerDescription> result = new ArrayList<>(peersList.size());
3968 for (ReplicationProtos.ReplicationPeerDescription peer : peersList) {
3969 result.add(ReplicationPeerConfigUtil.toReplicationPeerDescription(peer));
3971 return result;
3976 @Override
3977 public void decommissionRegionServers(List<ServerName> servers, boolean offload)
3978 throws IOException {
3979 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
3980 @Override
3981 public Void rpcCall() throws ServiceException {
3982 master.decommissionRegionServers(getRpcController(),
3983 RequestConverter.buildDecommissionRegionServersRequest(servers, offload));
3984 return null;
3989 @Override
3990 public List<ServerName> listDecommissionedRegionServers() throws IOException {
3991 return executeCallable(new MasterCallable<List<ServerName>>(getConnection(),
3992 getRpcControllerFactory()) {
3993 @Override
3994 public List<ServerName> rpcCall() throws ServiceException {
3995 ListDecommissionedRegionServersRequest req =
3996 ListDecommissionedRegionServersRequest.newBuilder().build();
3997 List<ServerName> servers = new ArrayList<>();
3998 for (HBaseProtos.ServerName server : master
3999 .listDecommissionedRegionServers(getRpcController(), req).getServerNameList()) {
4000 servers.add(ProtobufUtil.toServerName(server));
4002 return servers;
4007 @Override
4008 public void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
4009 throws IOException {
4010 executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
4011 @Override
4012 public Void rpcCall() throws ServiceException {
4013 master.recommissionRegionServer(getRpcController(),
4014 RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames));
4015 return null;
4020 @Override
4021 public List<TableCFs> listReplicatedTableCFs() throws IOException {
4022 List<TableCFs> replicatedTableCFs = new ArrayList<>();
4023 List<TableDescriptor> tables = listTableDescriptors();
4024 tables.forEach(table -> {
4025 Map<String, Integer> cfs = new HashMap<>();
4026 Stream.of(table.getColumnFamilies())
4027 .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL)
4028 .forEach(column -> {
4029 cfs.put(column.getNameAsString(), column.getScope());
4031 if (!cfs.isEmpty()) {
4032 replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs));
4035 return replicatedTableCFs;
4038 @Override
4039 public void enableTableReplication(final TableName tableName) throws IOException {
4040 if (tableName == null) {
4041 throw new IllegalArgumentException("Table name cannot be null");
4043 if (!tableExists(tableName)) {
4044 throw new TableNotFoundException("Table '" + tableName.getNameAsString()
4045 + "' does not exists.");
4047 byte[][] splits = getTableSplits(tableName);
4048 checkAndSyncTableDescToPeers(tableName, splits);
4049 setTableRep(tableName, true);
4052 @Override
4053 public void disableTableReplication(final TableName tableName) throws IOException {
4054 if (tableName == null) {
4055 throw new IllegalArgumentException("Table name is null");
4057 if (!tableExists(tableName)) {
4058 throw new TableNotFoundException("Table '" + tableName.getNameAsString()
4059 + "' does not exists.");
4061 setTableRep(tableName, false);
4065 * Connect to peer and check the table descriptor on peer:
4066 * <ol>
4067 * <li>Create the same table on peer when not exist.</li>
4068 * <li>Throw an exception if the table already has replication enabled on any of the column
4069 * families.</li>
4070 * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li>
4071 * </ol>
4072 * @param tableName name of the table to sync to the peer
4073 * @param splits table split keys
4074 * @throws IOException
4076 private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits)
4077 throws IOException {
4078 List<ReplicationPeerDescription> peers = listReplicationPeers();
4079 if (peers == null || peers.size() <= 0) {
4080 throw new IllegalArgumentException("Found no peer cluster for replication.");
4083 for (ReplicationPeerDescription peerDesc : peers) {
4084 if (peerDesc.getPeerConfig().needToReplicate(tableName)) {
4085 Configuration peerConf =
4086 ReplicationPeerConfigUtil.getPeerClusterConfiguration(this.conf, peerDesc);
4087 try (Connection conn = ConnectionFactory.createConnection(peerConf);
4088 Admin repHBaseAdmin = conn.getAdmin()) {
4089 TableDescriptor tableDesc = getDescriptor(tableName);
4090 TableDescriptor peerTableDesc = null;
4091 if (!repHBaseAdmin.tableExists(tableName)) {
4092 repHBaseAdmin.createTable(tableDesc, splits);
4093 } else {
4094 peerTableDesc = repHBaseAdmin.getDescriptor(tableName);
4095 if (peerTableDesc == null) {
4096 throw new IllegalArgumentException("Failed to get table descriptor for table "
4097 + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId());
4099 if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc,
4100 tableDesc) != 0) {
4101 throw new IllegalArgumentException("Table " + tableName.getNameAsString()
4102 + " exists in peer cluster " + peerDesc.getPeerId()
4103 + ", but the table descriptors are not same when compared with source cluster."
4104 + " Thus can not enable the table's replication switch.");
4113 * Set the table's replication switch if the table's replication switch is already not set.
4114 * @param tableName name of the table
4115 * @param enableRep is replication switch enable or disable
4116 * @throws IOException if a remote or network exception occurs
4118 private void setTableRep(final TableName tableName, boolean enableRep) throws IOException {
4119 TableDescriptor tableDesc = getDescriptor(tableName);
4120 if (!tableDesc.matchReplicationScope(enableRep)) {
4121 int scope =
4122 enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL;
4123 modifyTable(TableDescriptorBuilder.newBuilder(tableDesc).setReplicationScope(scope).build());
4127 @Override
4128 public void clearCompactionQueues(final ServerName sn, final Set<String> queues)
4129 throws IOException, InterruptedException {
4130 if (queues == null || queues.size() == 0) {
4131 throw new IllegalArgumentException("queues cannot be null or empty");
4133 final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
4134 Callable<Void> callable = new Callable<Void>() {
4135 @Override
4136 public Void call() throws Exception {
4137 // TODO: There is no timeout on this controller. Set one!
4138 HBaseRpcController controller = rpcControllerFactory.newController();
4139 ClearCompactionQueuesRequest request =
4140 RequestConverter.buildClearCompactionQueuesRequest(queues);
4141 admin.clearCompactionQueues(controller, request);
4142 return null;
4145 ProtobufUtil.call(callable);
4148 @Override
4149 public List<ServerName> clearDeadServers(final List<ServerName> servers) throws IOException {
4150 if (servers == null || servers.size() == 0) {
4151 throw new IllegalArgumentException("servers cannot be null or empty");
4153 return executeCallable(new MasterCallable<List<ServerName>>(getConnection(),
4154 getRpcControllerFactory()) {
4155 @Override
4156 protected List<ServerName> rpcCall() throws Exception {
4157 ClearDeadServersRequest req = RequestConverter.buildClearDeadServersRequest(servers);
4158 return ProtobufUtil.toServerNameList(
4159 master.clearDeadServers(getRpcController(), req).getServerNameList());