2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org
.apache
.hadoop
.hbase
.client
;
20 import static org
.apache
.hadoop
.hbase
.util
.FutureUtils
.get
;
22 import java
.io
.Closeable
;
23 import java
.io
.IOException
;
24 import java
.util
.Collection
;
25 import java
.util
.EnumSet
;
26 import java
.util
.HashMap
;
27 import java
.util
.List
;
30 import java
.util
.concurrent
.Future
;
31 import java
.util
.concurrent
.TimeUnit
;
32 import java
.util
.regex
.Pattern
;
33 import java
.util
.stream
.Collectors
;
34 import org
.apache
.hadoop
.conf
.Configuration
;
35 import org
.apache
.hadoop
.hbase
.Abortable
;
36 import org
.apache
.hadoop
.hbase
.CacheEvictionStats
;
37 import org
.apache
.hadoop
.hbase
.ClusterMetrics
;
38 import org
.apache
.hadoop
.hbase
.ClusterMetrics
.Option
;
39 import org
.apache
.hadoop
.hbase
.NamespaceDescriptor
;
40 import org
.apache
.hadoop
.hbase
.NamespaceNotFoundException
;
41 import org
.apache
.hadoop
.hbase
.RegionMetrics
;
42 import org
.apache
.hadoop
.hbase
.ServerName
;
43 import org
.apache
.hadoop
.hbase
.TableExistsException
;
44 import org
.apache
.hadoop
.hbase
.TableName
;
45 import org
.apache
.hadoop
.hbase
.TableNotFoundException
;
46 import org
.apache
.hadoop
.hbase
.client
.replication
.ReplicationPeerConfigUtil
;
47 import org
.apache
.hadoop
.hbase
.client
.replication
.TableCFs
;
48 import org
.apache
.hadoop
.hbase
.client
.security
.SecurityCapability
;
49 import org
.apache
.hadoop
.hbase
.ipc
.CoprocessorRpcChannel
;
50 import org
.apache
.hadoop
.hbase
.net
.Address
;
51 import org
.apache
.hadoop
.hbase
.quotas
.QuotaFilter
;
52 import org
.apache
.hadoop
.hbase
.quotas
.QuotaSettings
;
53 import org
.apache
.hadoop
.hbase
.quotas
.SpaceQuotaSnapshotView
;
54 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.FailedLogCloseException
;
55 import org
.apache
.hadoop
.hbase
.replication
.ReplicationException
;
56 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerConfig
;
57 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerDescription
;
58 import org
.apache
.hadoop
.hbase
.replication
.SyncReplicationState
;
59 import org
.apache
.hadoop
.hbase
.rsgroup
.RSGroupInfo
;
60 import org
.apache
.hadoop
.hbase
.security
.access
.GetUserPermissionsRequest
;
61 import org
.apache
.hadoop
.hbase
.security
.access
.Permission
;
62 import org
.apache
.hadoop
.hbase
.security
.access
.UserPermission
;
63 import org
.apache
.hadoop
.hbase
.snapshot
.HBaseSnapshotException
;
64 import org
.apache
.hadoop
.hbase
.snapshot
.RestoreSnapshotException
;
65 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotCreationException
;
66 import org
.apache
.hadoop
.hbase
.snapshot
.UnknownSnapshotException
;
67 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
68 import org
.apache
.hadoop
.hbase
.util
.Pair
;
69 import org
.apache
.yetus
.audience
.InterfaceAudience
;
71 import org
.apache
.hbase
.thirdparty
.com
.google
.common
.collect
.ImmutableList
;
72 import org
.apache
.yetus
.audience
.InterfaceStability
;
75 * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and
76 * call {@link #close()} when done.
77 * <p>Admin can be used to create, drop, list, enable and disable and otherwise modify tables,
78 * as well as perform other administrative operations.
80 * @see ConnectionFactory
85 @InterfaceAudience.Public
86 public interface Admin
extends Abortable
, Closeable
{
89 * Return the operation timeout for a rpc call.
90 * @see #getSyncWaitTimeout()
92 int getOperationTimeout();
95 * Return the blocking wait time for an asynchronous operation. Can be configured by
96 * {@code hbase.client.sync.wait.timeout.msec}.
98 * For several operations, such as createTable, deleteTable, etc, the rpc call will finish right
99 * after we schedule a procedure at master side, so the timeout will not be controlled by the
100 * above {@link #getOperationTimeout()}. And timeout value here tells you how much time we will
101 * wait until the procedure at master side is finished.
103 * In general, you can consider that the implementation for XXXX method is just a
104 * XXXXAsync().get(getSyncWaitTimeout(), TimeUnit.MILLISECONDS).
105 * @see #getOperationTimeout()
107 int getSyncWaitTimeout();
110 void abort(String why
, Throwable e
);
116 * @return Connection used by this object.
118 Connection
getConnection();
121 * @param tableName Table to check.
122 * @return <code>true</code> if table exists already.
123 * @throws IOException if a remote or network exception occurs
125 boolean tableExists(TableName tableName
) throws IOException
;
128 * List all the userspace tables.
130 * @return a list of TableDescriptors
131 * @throws IOException if a remote or network exception occurs
133 List
<TableDescriptor
> listTableDescriptors() throws IOException
;
136 * List all userspace tables and whether or not include system tables.
138 * @return a list of TableDescriptors
139 * @throws IOException if a remote or network exception occurs
141 List
<TableDescriptor
> listTableDescriptors(boolean includeSysTables
) throws IOException
;
144 * List all the userspace tables that match the given pattern.
146 * @param pattern The compiled regular expression to match against
147 * @return a list of TableDescriptors
148 * @throws IOException if a remote or network exception occurs
149 * @see #listTableDescriptors()
151 default List
<TableDescriptor
> listTableDescriptors(Pattern pattern
) throws IOException
{
152 return listTableDescriptors(pattern
, false);
156 * List all the tables matching the given pattern.
158 * @param pattern The compiled regular expression to match against
159 * @param includeSysTables <code>false</code> to match only against userspace tables
160 * @return a list of TableDescriptors
161 * @throws IOException if a remote or network exception occurs
162 * @see #listTableDescriptors()
164 List
<TableDescriptor
> listTableDescriptors(Pattern pattern
, boolean includeSysTables
)
168 * List all of the names of userspace tables.
170 * @return TableName[] table names
171 * @throws IOException if a remote or network exception occurs
173 TableName
[] listTableNames() throws IOException
;
176 * List all of the names of userspace tables.
177 * @param pattern The regular expression to match against
178 * @return array of table names
179 * @throws IOException if a remote or network exception occurs
181 default TableName
[] listTableNames(Pattern pattern
) throws IOException
{
182 return listTableNames(pattern
, false);
186 * List all of the names of userspace tables.
187 * @param pattern The regular expression to match against
188 * @param includeSysTables <code>false</code> to match only against userspace tables
189 * @return TableName[] table names
190 * @throws IOException if a remote or network exception occurs
192 TableName
[] listTableNames(Pattern pattern
, boolean includeSysTables
)
196 * Get a table descriptor.
198 * @param tableName as a {@link TableName}
199 * @return the tableDescriptor
200 * @throws org.apache.hadoop.hbase.TableNotFoundException
201 * @throws IOException if a remote or network exception occurs
203 TableDescriptor
getDescriptor(TableName tableName
)
204 throws TableNotFoundException
, IOException
;
207 * Creates a new table. Synchronous operation.
209 * @param desc table descriptor for table
210 * @throws IllegalArgumentException if the table name is reserved
211 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
212 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
213 * threads, the table may have been created between test-for-existence and attempt-at-creation).
214 * @throws IOException if a remote or network exception occurs
216 default void createTable(TableDescriptor desc
) throws IOException
{
217 get(createTableAsync(desc
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
221 * Creates a new table with the specified number of regions. The start key specified will become
222 * the end key of the first region of the table, and the end key specified will become the start
223 * key of the last region of the table (the first region has a null start key and the last region
224 * has a null end key). BigInteger math will be used to divide the key range specified into enough
225 * segments to make the required number of total regions. Synchronous operation.
227 * @param desc table descriptor for table
228 * @param startKey beginning of key range
229 * @param endKey end of key range
230 * @param numRegions the total number of regions to create
231 * @throws IOException if a remote or network exception occurs
232 * @throws IllegalArgumentException if the table name is reserved
233 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
234 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
235 * threads, the table may have been created between test-for-existence and attempt-at-creation).
237 void createTable(TableDescriptor desc
, byte[] startKey
, byte[] endKey
, int numRegions
)
241 * Creates a new table with an initial set of empty regions defined by the specified split keys.
242 * The total number of regions created will be the number of split keys plus one. Synchronous
243 * operation. Note : Avoid passing empty split key.
245 * @param desc table descriptor for table
246 * @param splitKeys array of split keys for the initial regions of the table
247 * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
248 * and if the split key has empty byte array.
249 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
250 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
251 * threads, the table may have been created between test-for-existence and attempt-at-creation).
252 * @throws IOException if a remote or network exception occurs
254 default void createTable(TableDescriptor desc
, byte[][] splitKeys
) throws IOException
{
255 get(createTableAsync(desc
, splitKeys
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
259 * Creates a new table but does not block and wait for it to come online. You can use
260 * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
261 * ExecutionException if there was an error while executing the operation or TimeoutException in
262 * case the wait timeout was not long enough to allow the operation to complete.
264 * Throws IllegalArgumentException Bad table name, if the split keys are repeated and if the split
265 * key has empty byte array.
266 * @param desc table descriptor for table
267 * @throws IOException if a remote or network exception occurs
268 * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the
269 * operation to complete.
271 Future
<Void
> createTableAsync(TableDescriptor desc
) throws IOException
;
274 * Creates a new table but does not block and wait for it to come online.
275 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
276 * It may throw ExecutionException if there was an error while executing the operation
277 * or TimeoutException in case the wait timeout was not long enough to allow the
278 * operation to complete.
279 * Throws IllegalArgumentException Bad table name, if the split keys
280 * are repeated and if the split key has empty byte array.
282 * @param desc table descriptor for table
283 * @param splitKeys keys to check if the table has been created with all split keys
284 * @throws IOException if a remote or network exception occurs
285 * @return the result of the async creation. You can use Future.get(long, TimeUnit) to wait on the
286 * operation to complete.
288 Future
<Void
> createTableAsync(TableDescriptor desc
, byte[][] splitKeys
) throws IOException
;
291 * Deletes a table. Synchronous operation.
292 * @param tableName name of table to delete
293 * @throws IOException if a remote or network exception occurs
295 default void deleteTable(TableName tableName
) throws IOException
{
296 get(deleteTableAsync(tableName
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
300 * Deletes the table but does not block and wait for it to be completely removed.
301 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
302 * It may throw ExecutionException if there was an error while executing the operation
303 * or TimeoutException in case the wait timeout was not long enough to allow the
304 * operation to complete.
306 * @param tableName name of table to delete
307 * @throws IOException if a remote or network exception occurs
308 * @return the result of the async delete. You can use Future.get(long, TimeUnit)
309 * to wait on the operation to complete.
311 Future
<Void
> deleteTableAsync(TableName tableName
) throws IOException
;
314 * Truncate a table. Synchronous operation.
315 * @param tableName name of table to truncate
316 * @param preserveSplits <code>true</code> if the splits should be preserved
317 * @throws IOException if a remote or network exception occurs
319 default void truncateTable(TableName tableName
, boolean preserveSplits
) throws IOException
{
320 get(truncateTableAsync(tableName
, preserveSplits
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
324 * Truncate the table but does not block and wait for it to be completely enabled. You can use
325 * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
326 * ExecutionException if there was an error while executing the operation or TimeoutException in
327 * case the wait timeout was not long enough to allow the operation to complete.
328 * @param tableName name of table to delete
329 * @param preserveSplits <code>true</code> if the splits should be preserved
330 * @throws IOException if a remote or network exception occurs
331 * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the
332 * operation to complete.
334 Future
<Void
> truncateTableAsync(TableName tableName
, boolean preserveSplits
)
338 * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
339 * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
340 * disabled state for it to be enabled.
341 * @param tableName name of the table
342 * @throws IOException There could be couple types of
343 * IOException TableNotFoundException means the table doesn't exist.
344 * TableNotDisabledException means the table isn't in disabled state.
345 * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
346 * @see #disableTable(org.apache.hadoop.hbase.TableName)
347 * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
349 default void enableTable(TableName tableName
) throws IOException
{
350 get(enableTableAsync(tableName
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
354 * Enable the table but does not block and wait for it to be completely enabled.
355 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
356 * It may throw ExecutionException if there was an error while executing the operation
357 * or TimeoutException in case the wait timeout was not long enough to allow the
358 * operation to complete.
360 * @param tableName name of table to delete
361 * @throws IOException if a remote or network exception occurs
362 * @return the result of the async enable. You can use Future.get(long, TimeUnit)
363 * to wait on the operation to complete.
365 Future
<Void
> enableTableAsync(TableName tableName
) throws IOException
;
368 * Disable the table but does not block and wait for it to be completely disabled.
369 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
370 * It may throw ExecutionException if there was an error while executing the operation
371 * or TimeoutException in case the wait timeout was not long enough to allow the
372 * operation to complete.
374 * @param tableName name of table to delete
375 * @throws IOException if a remote or network exception occurs
376 * @return the result of the async disable. You can use Future.get(long, TimeUnit)
377 * to wait on the operation to complete.
379 Future
<Void
> disableTableAsync(TableName tableName
) throws IOException
;
382 * Disable table and wait on completion. May timeout eventually. Use
383 * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
384 * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
385 * enabled state for it to be disabled.
387 * @throws IOException There could be couple types of IOException TableNotFoundException means the
388 * table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
390 default void disableTable(TableName tableName
) throws IOException
{
391 get(disableTableAsync(tableName
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
395 * @param tableName name of table to check
396 * @return <code>true</code> if table is on-line
397 * @throws IOException if a remote or network exception occurs
399 boolean isTableEnabled(TableName tableName
) throws IOException
;
402 * @param tableName name of table to check
403 * @return <code>true</code> if table is off-line
404 * @throws IOException if a remote or network exception occurs
406 boolean isTableDisabled(TableName tableName
) throws IOException
;
409 * @param tableName name of table to check
410 * @return <code>true</code> if all regions of the table are available
411 * @throws IOException if a remote or network exception occurs
413 boolean isTableAvailable(TableName tableName
) throws IOException
;
416 * Add a column family to an existing table. Synchronous operation. Use
417 * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a
418 * {@link Future} from which you can learn whether success or failure.
419 * @param tableName name of the table to add column family to
420 * @param columnFamily column family descriptor of column family to be added
421 * @throws IOException if a remote or network exception occurs
423 default void addColumnFamily(TableName tableName
, ColumnFamilyDescriptor columnFamily
)
425 get(addColumnFamilyAsync(tableName
, columnFamily
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
429 * Add a column family to an existing table. Asynchronous operation.
430 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
431 * It may throw ExecutionException if there was an error while executing the operation
432 * or TimeoutException in case the wait timeout was not long enough to allow the
433 * operation to complete.
435 * @param tableName name of the table to add column family to
436 * @param columnFamily column family descriptor of column family to be added
437 * @throws IOException if a remote or network exception occurs
438 * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to
439 * wait on the operation to complete.
441 Future
<Void
> addColumnFamilyAsync(TableName tableName
, ColumnFamilyDescriptor columnFamily
)
445 * Delete a column family from a table. Synchronous operation. Use
446 * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from
447 * which you can learn whether success or failure.
448 * @param tableName name of table
449 * @param columnFamily name of column family to be deleted
450 * @throws IOException if a remote or network exception occurs
452 default void deleteColumnFamily(TableName tableName
, byte[] columnFamily
) throws IOException
{
453 get(deleteColumnFamilyAsync(tableName
, columnFamily
), getSyncWaitTimeout(),
454 TimeUnit
.MILLISECONDS
);
458 * Delete a column family from a table. Asynchronous operation.
459 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
460 * It may throw ExecutionException if there was an error while executing the operation
461 * or TimeoutException in case the wait timeout was not long enough to allow the
462 * operation to complete.
464 * @param tableName name of table
465 * @param columnFamily name of column family to be deleted
466 * @throws IOException if a remote or network exception occurs
467 * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to
468 * wait on the operation to complete.
470 Future
<Void
> deleteColumnFamilyAsync(TableName tableName
, byte[] columnFamily
)
474 * Modify an existing column family on a table. Synchronous operation. Use
475 * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns
476 * a {@link Future} from which you can learn whether success or failure.
477 * @param tableName name of table
478 * @param columnFamily new column family descriptor to use
479 * @throws IOException if a remote or network exception occurs
481 default void modifyColumnFamily(TableName tableName
, ColumnFamilyDescriptor columnFamily
)
483 get(modifyColumnFamilyAsync(tableName
, columnFamily
), getSyncWaitTimeout(),
484 TimeUnit
.MILLISECONDS
);
488 * Modify an existing column family on a table. Asynchronous operation.
489 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
490 * It may throw ExecutionException if there was an error while executing the operation
491 * or TimeoutException in case the wait timeout was not long enough to allow the
492 * operation to complete.
494 * @param tableName name of table
495 * @param columnFamily new column family descriptor to use
496 * @throws IOException if a remote or network exception occurs
497 * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to
498 * wait on the operation to complete.
500 Future
<Void
> modifyColumnFamilyAsync(TableName tableName
, ColumnFamilyDescriptor columnFamily
)
504 * Get all the online regions on a region server.
506 * @return List of {@link RegionInfo}
507 * @throws IOException if a remote or network exception occurs
509 List
<RegionInfo
> getRegions(ServerName serverName
) throws IOException
;
512 * Flush a table. Synchronous operation.
514 * @param tableName table to flush
515 * @throws IOException if a remote or network exception occurs
517 void flush(TableName tableName
) throws IOException
;
520 * Flush the specified column family stores on all regions of the passed table.
521 * This runs as a synchronous operation.
523 * @param tableName table to flush
524 * @param columnFamily column family within a table
525 * @throws IOException if a remote or network exception occurs
527 void flush(TableName tableName
, byte[] columnFamily
) throws IOException
;
530 * Flush an individual region. Synchronous operation.
532 * @param regionName region to flush
533 * @throws IOException if a remote or network exception occurs
535 void flushRegion(byte[] regionName
) throws IOException
;
538 * Flush a column family within a region. Synchronous operation.
540 * @param regionName region to flush
541 * @param columnFamily column family within a region
542 * @throws IOException if a remote or network exception occurs
544 void flushRegion(byte[] regionName
, byte[] columnFamily
) throws IOException
;
547 * Flush all regions on the region server. Synchronous operation.
548 * @param serverName the region server name to flush
549 * @throws IOException if a remote or network exception occurs
551 void flushRegionServer(ServerName serverName
) throws IOException
;
554 * Compact a table. Asynchronous operation in that this method requests that a
555 * Compaction run and then it returns. It does not wait on the completion of Compaction
556 * (it can take a while).
558 * @param tableName table to compact
559 * @throws IOException if a remote or network exception occurs
561 void compact(TableName tableName
) throws IOException
;
564 * Compact an individual region. Asynchronous operation in that this method requests that a
565 * Compaction run and then it returns. It does not wait on the completion of Compaction
566 * (it can take a while).
568 * @param regionName region to compact
569 * @throws IOException if a remote or network exception occurs
571 void compactRegion(byte[] regionName
) throws IOException
;
574 * Compact a column family within a table. Asynchronous operation in that this method requests
575 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
576 * (it can take a while).
578 * @param tableName table to compact
579 * @param columnFamily column family within a table
580 * @throws IOException if a remote or network exception occurs
582 void compact(TableName tableName
, byte[] columnFamily
)
586 * Compact a column family within a region. Asynchronous operation in that this method requests
587 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
588 * (it can take a while).
590 * @param regionName region to compact
591 * @param columnFamily column family within a region
592 * @throws IOException if a remote or network exception occurs
594 void compactRegion(byte[] regionName
, byte[] columnFamily
)
598 * Compact a table. Asynchronous operation in that this method requests that a
599 * Compaction run and then it returns. It does not wait on the completion of Compaction
600 * (it can take a while).
602 * @param tableName table to compact
603 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
604 * @throws IOException if a remote or network exception occurs
605 * @throws InterruptedException
607 void compact(TableName tableName
, CompactType compactType
)
608 throws IOException
, InterruptedException
;
611 * Compact a column family within a table. Asynchronous operation in that this method
612 * requests that a Compaction run and then it returns. It does not wait on the
613 * completion of Compaction (it can take a while).
615 * @param tableName table to compact
616 * @param columnFamily column family within a table
617 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
618 * @throws IOException if not a mob column family or if a remote or network exception occurs
619 * @throws InterruptedException
621 void compact(TableName tableName
, byte[] columnFamily
, CompactType compactType
)
622 throws IOException
, InterruptedException
;
625 * Major compact a table. Asynchronous operation in that this method requests
626 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
627 * (it can take a while).
629 * @param tableName table to major compact
630 * @throws IOException if a remote or network exception occurs
632 void majorCompact(TableName tableName
) throws IOException
;
635 * Major compact a table or an individual region. Asynchronous operation in that this method requests
636 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
637 * (it can take a while).
639 * @param regionName region to major compact
640 * @throws IOException if a remote or network exception occurs
642 void majorCompactRegion(byte[] regionName
) throws IOException
;
645 * Major compact a column family within a table. Asynchronous operation in that this method requests
646 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
647 * (it can take a while).
649 * @param tableName table to major compact
650 * @param columnFamily column family within a table
651 * @throws IOException if a remote or network exception occurs
653 void majorCompact(TableName tableName
, byte[] columnFamily
)
657 * Major compact a column family within region. Asynchronous operation in that this method requests
658 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
659 * (it can take a while).
661 * @param regionName egion to major compact
662 * @param columnFamily column family within a region
663 * @throws IOException if a remote or network exception occurs
665 void majorCompactRegion(byte[] regionName
, byte[] columnFamily
)
669 * Major compact a table. Asynchronous operation in that this method requests that a
670 * Compaction run and then it returns. It does not wait on the completion of Compaction
671 * (it can take a while).
673 * @param tableName table to compact
674 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
675 * @throws IOException if a remote or network exception occurs
676 * @throws InterruptedException
678 void majorCompact(TableName tableName
, CompactType compactType
)
679 throws IOException
, InterruptedException
;
682 * Major compact a column family within a table. Asynchronous operation in that this method requests that a
683 * Compaction run and then it returns. It does not wait on the completion of Compaction
684 * (it can take a while).
686 * @param tableName table to compact
687 * @param columnFamily column family within a table
688 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
689 * @throws IOException if not a mob column family or if a remote or network exception occurs
690 * @throws InterruptedException
692 void majorCompact(TableName tableName
, byte[] columnFamily
, CompactType compactType
)
693 throws IOException
, InterruptedException
;
696 * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing
697 * compactions. This state is ephemeral. The setting will be lost on restart. Compaction
698 * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled
701 * @param switchState Set to <code>true</code> to enable, <code>false</code> to disable.
702 * @param serverNamesList list of region servers.
703 * @return Previous compaction states for region servers
704 * @throws IOException if a remote or network exception occurs
706 Map
<ServerName
, Boolean
> compactionSwitch(boolean switchState
, List
<String
> serverNamesList
)
710 * Compact all regions on the region server. Asynchronous operation in that this method requests
711 * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
713 * @param serverName the region server name
714 * @throws IOException if a remote or network exception occurs
716 void compactRegionServer(ServerName serverName
) throws IOException
;
719 * Major compact all regions on the region server. Asynchronous operation in that this method
720 * requests that a Compaction run and then it returns. It does not wait on the completion of
721 * Compaction (it can take a while).
722 * @param serverName the region server name
723 * @throws IOException if a remote or network exception occurs
725 void majorCompactRegionServer(ServerName serverName
) throws IOException
;
728 * Move the region <code>encodedRegionName</code> to a random server.
729 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
730 * suffix: e.g. if regionname is
731 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
732 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
733 * @throws IOException if we can't find a region named <code>encodedRegionName</code>
735 void move(byte[] encodedRegionName
) throws IOException
;
738 * Move the region <code>rencodedRegionName</code> to <code>destServerName</code>.
739 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
740 * suffix: e.g. if regionname is
741 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
742 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
743 * @param destServerName The servername of the destination regionserver. If passed the empty byte
744 * array we'll assign to a random server. A server name is made of host, port and
745 * startcode. Here is an example: <code> host187.example.com,60020,1289493121758</code>
746 * @throws IOException if we can't find a region named <code>encodedRegionName</code>
747 * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)}
748 * instead. And if you want to move the region to a random server, please use
749 * {@link #move(byte[])}.
750 * @see <a href="https://issues.apache.org/jira/browse/HBASE-22108">HBASE-22108</a>
753 default void move(byte[] encodedRegionName
, byte[] destServerName
) throws IOException
{
754 if (destServerName
== null || destServerName
.length
== 0) {
755 move(encodedRegionName
);
757 move(encodedRegionName
, ServerName
.valueOf(Bytes
.toString(destServerName
)));
762 * Move the region <code>encodedRegionName</code> to <code>destServerName</code>.
763 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
764 * suffix: e.g. if regionname is
765 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
766 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
767 * @param destServerName The servername of the destination regionserver. A server name is made of
768 * host, port and startcode. Here is an example:
769 * <code> host187.example.com,60020,1289493121758</code>
770 * @throws IOException if we can't find a region named <code>encodedRegionName</code>
772 void move(byte[] encodedRegionName
, ServerName destServerName
) throws IOException
;
776 * @param regionName Region name to assign.
777 * @throws IOException if a remote or network exception occurs
779 void assign(byte[] regionName
) throws IOException
;
783 * @param regionName Region name to assign.
784 * @throws IOException if a remote or network exception occurs
786 void unassign(byte[] regionName
) throws IOException
;
789 * Unassign a region from current hosting regionserver. Region will then be assigned to a
790 * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link
791 * #move(byte[], ServerName)} if you want to control the region movement.
793 * @param regionName Region to unassign. Will clear any existing RegionPlan if one found.
794 * @param force If <code>true</code>, force unassign (Will remove region from regions-in-transition too if
795 * present. If results in double assignment use hbck -fix to resolve. To be used by experts).
796 * @throws IOException if a remote or network exception occurs
797 * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])}
799 * @see <a href="https://issues.apache.org/jira/browse/HBASE-24875">HBASE-24875</a>
802 default void unassign(byte[] regionName
, boolean force
) throws IOException
{
803 unassign(regionName
);
807 * Offline specified region from master's in-memory state. It will not attempt to reassign the
808 * region as in unassign. This API can be used when a region not served by any region server and
809 * still online as per Master's in memory state. If this API is incorrectly used on active region
810 * then master will loose track of that region. This is a special method that should be used by
813 * @param regionName Region to offline.
814 * @throws IOException if a remote or network exception occurs
816 void offline(byte[] regionName
) throws IOException
;
819 * Turn the load balancer on or off.
820 * @param onOrOff Set to <code>true</code> to enable, <code>false</code> to disable.
821 * @param synchronous If <code>true</code>, it waits until current balance() call, if outstanding,
823 * @return Previous balancer value
824 * @throws IOException if a remote or network exception occurs
826 boolean balancerSwitch(boolean onOrOff
, boolean synchronous
) throws IOException
;
829 * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
830 * reassignments. Can NOT run for various reasons. Check logs.
832 * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
833 * @throws IOException if a remote or network exception occurs
835 default boolean balance() throws IOException
{
836 return balance(BalanceRequest
.defaultInstance())
841 * Invoke the balancer with the given balance request. The BalanceRequest defines how the
842 * balancer will run. See {@link BalanceRequest} for more details.
844 * @param request defines how the balancer should run
845 * @return {@link BalanceResponse} with details about the results of the invocation.
846 * @throws IOException if a remote or network exception occurs
848 BalanceResponse
balance(BalanceRequest request
) throws IOException
;
851 * Invoke the balancer. Will run the balancer and if regions to move, it will
852 * go ahead and do the reassignments. If there is region in transition, force parameter of true
853 * would still run balancer. Can *not* run for other reasons. Check
855 * @param force whether we should force balance even if there is region in transition
856 * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
857 * @throws IOException if a remote or network exception occurs
858 * @deprecated Since 2.5.0. Will be removed in 4.0.0.
859 * Use {@link #balance(BalanceRequest)} instead.
862 default boolean balance(boolean force
) throws IOException
{
864 BalanceRequest
.newBuilder()
865 .setIgnoreRegionsInTransition(force
)
871 * Query the current state of the balancer.
873 * @return <code>true</code> if the balancer is enabled, <code>false</code> otherwise.
874 * @throws IOException if a remote or network exception occurs
876 boolean isBalancerEnabled() throws IOException
;
879 * Clear all the blocks corresponding to this table from BlockCache. For expert-admins.
880 * Calling this API will drop all the cached blocks specific to a table from BlockCache.
881 * This can significantly impact the query performance as the subsequent queries will
882 * have to retrieve the blocks from underlying filesystem.
884 * @param tableName table to clear block cache
885 * @return CacheEvictionStats related to the eviction
886 * @throws IOException if a remote or network exception occurs
888 CacheEvictionStats
clearBlockCache(final TableName tableName
) throws IOException
;
891 * Invoke region normalizer. Can NOT run for various reasons. Check logs.
892 * This is a non-blocking invocation to region normalizer. If return value is true, it means
893 * the request was submitted successfully. We need to check logs for the details of which regions
896 * @return {@code true} if region normalizer ran, {@code false} otherwise.
897 * @throws IOException if a remote or network exception occurs
899 default boolean normalize() throws IOException
{
900 return normalize(new NormalizeTableFilterParams
.Builder().build());
904 * Invoke region normalizer. Can NOT run for various reasons. Check logs.
905 * This is a non-blocking invocation to region normalizer. If return value is true, it means
906 * the request was submitted successfully. We need to check logs for the details of which regions
909 * @param ntfp limit to tables matching the specified filter.
910 * @return {@code true} if region normalizer ran, {@code false} otherwise.
911 * @throws IOException if a remote or network exception occurs
913 boolean normalize(NormalizeTableFilterParams ntfp
) throws IOException
;
916 * Query the current state of the region normalizer.
918 * @return <code>true</code> if region normalizer is enabled, <code>false</code> otherwise.
919 * @throws IOException if a remote or network exception occurs
921 boolean isNormalizerEnabled() throws IOException
;
924 * Turn region normalizer on or off.
926 * @return Previous normalizer value
927 * @throws IOException if a remote or network exception occurs
929 boolean normalizerSwitch(boolean on
) throws IOException
;
932 * Enable/Disable the catalog janitor/
934 * @param onOrOff if <code>true</code> enables the catalog janitor
935 * @return the previous state
936 * @throws IOException if a remote or network exception occurs
938 boolean catalogJanitorSwitch(boolean onOrOff
) throws IOException
;
941 * Ask for a scan of the catalog table.
943 * @return the number of entries cleaned. Returns -1 if previous run is in progress.
944 * @throws IOException if a remote or network exception occurs
946 int runCatalogJanitor() throws IOException
;
949 * Query on the catalog janitor state (Enabled/Disabled?).
951 * @throws IOException if a remote or network exception occurs
953 boolean isCatalogJanitorEnabled() throws IOException
;
956 * Enable/Disable the cleaner chore.
958 * @param onOrOff if <code>true</code> enables the cleaner chore
959 * @return the previous state
960 * @throws IOException if a remote or network exception occurs
962 boolean cleanerChoreSwitch(boolean onOrOff
) throws IOException
;
965 * Ask for cleaner chore to run.
967 * @return <code>true</code> if cleaner chore ran, <code>false</code> otherwise
968 * @throws IOException if a remote or network exception occurs
970 boolean runCleanerChore() throws IOException
;
973 * Query on the cleaner chore state (Enabled/Disabled?).
975 * @throws IOException if a remote or network exception occurs
977 boolean isCleanerChoreEnabled() throws IOException
;
981 * Merge two regions. Asynchronous operation.
982 * @param nameOfRegionA encoded or full name of region a
983 * @param nameOfRegionB encoded or full name of region b
984 * @param forcible <code>true</code> if do a compulsory merge, otherwise we will only merge two
986 * @throws IOException if a remote or network exception occurs
987 * @deprecated since 2.3.0 and will be removed in 4.0.0. Multi-region merge feature is now
988 * supported. Use {@link #mergeRegionsAsync(byte[][], boolean)} instead.
991 default Future
<Void
> mergeRegionsAsync(byte[] nameOfRegionA
, byte[] nameOfRegionB
,
992 boolean forcible
) throws IOException
{
993 byte[][] nameofRegionsToMerge
= new byte[2][];
994 nameofRegionsToMerge
[0] = nameOfRegionA
;
995 nameofRegionsToMerge
[1] = nameOfRegionB
;
996 return mergeRegionsAsync(nameofRegionsToMerge
, forcible
);
1000 * Merge multiple regions (>=2). Asynchronous operation.
1001 * @param nameofRegionsToMerge encoded or full name of daughter regions
1002 * @param forcible <code>true</code> if do a compulsory merge, otherwise we will only merge
1004 * @throws IOException if a remote or network exception occurs
1006 Future
<Void
> mergeRegionsAsync(byte[][] nameofRegionsToMerge
, boolean forcible
)
1010 * Split a table. The method will execute split action for each region in table.
1011 * @param tableName table to split
1012 * @throws IOException if a remote or network exception occurs
1014 void split(TableName tableName
) throws IOException
;
1018 * @param tableName table to split
1019 * @param splitPoint the explicit position to split on
1020 * @throws IOException if a remote or network exception occurs
1022 void split(TableName tableName
, byte[] splitPoint
) throws IOException
;
1025 * Split an individual region. Asynchronous operation.
1026 * @param regionName region to split
1027 * @throws IOException if a remote or network exception occurs
1029 Future
<Void
> splitRegionAsync(byte[] regionName
) throws IOException
;
1032 * Split an individual region. Asynchronous operation.
1033 * @param regionName region to split
1034 * @param splitPoint the explicit position to split on
1035 * @throws IOException if a remote or network exception occurs
1037 Future
<Void
> splitRegionAsync(byte[] regionName
, byte[] splitPoint
) throws IOException
;
1040 * Modify an existing table, more IRB friendly version.
1041 * @param td modified description of the table
1042 * @throws IOException if a remote or network exception occurs
1044 default void modifyTable(TableDescriptor td
) throws IOException
{
1045 get(modifyTableAsync(td
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
1049 * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means
1050 * that it may be a while before your schema change is updated across all of the table. You can
1051 * use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1052 * ExecutionException if there was an error while executing the operation or TimeoutException in
1053 * case the wait timeout was not long enough to allow the operation to complete.
1054 * @param td description of the table
1055 * @throws IOException if a remote or network exception occurs
1056 * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
1057 * operation to complete
1059 Future
<Void
> modifyTableAsync(TableDescriptor td
) throws IOException
;
1062 * Shuts down the HBase cluster.
1064 * Notice that, a success shutdown call may ends with an error since the remote server has already
1066 * @throws IOException if a remote or network exception occurs
1068 void shutdown() throws IOException
;
1071 * Shuts down the current HBase master only. Does not shutdown the cluster.
1073 * Notice that, a success stopMaster call may ends with an error since the remote server has
1074 * already been shutdown.
1075 * @throws IOException if a remote or network exception occurs
1078 void stopMaster() throws IOException
;
1081 * Check whether Master is in maintenance mode.
1083 * @throws IOException if a remote or network exception occurs
1085 boolean isMasterInMaintenanceMode() throws IOException
;
1088 * Stop the designated regionserver.
1090 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
1091 * <code>example.org:1234</code>
1092 * @throws IOException if a remote or network exception occurs
1094 void stopRegionServer(String hostnamePort
) throws IOException
;
1097 * Get whole cluster metrics, containing status about:
1101 * primary/backup master(s)
1102 * master's coprocessors
1103 * live/dead regionservers
1105 * regions in transition
1107 * @return cluster metrics
1108 * @throws IOException if a remote or network exception occurs
1110 default ClusterMetrics
getClusterMetrics() throws IOException
{
1111 return getClusterMetrics(EnumSet
.allOf(ClusterMetrics
.Option
.class));
1115 * Get cluster status with a set of {@link Option} to get desired status.
1116 * @return cluster status
1117 * @throws IOException if a remote or network exception occurs
1119 ClusterMetrics
getClusterMetrics(EnumSet
<Option
> options
) throws IOException
;
1122 * @return current master server name
1123 * @throws IOException if a remote or network exception occurs
1125 default ServerName
getMaster() throws IOException
{
1126 return getClusterMetrics(EnumSet
.of(Option
.MASTER
)).getMasterName();
1130 * @return current backup master list
1131 * @throws IOException if a remote or network exception occurs
1133 default Collection
<ServerName
> getBackupMasters() throws IOException
{
1134 return getClusterMetrics(EnumSet
.of(Option
.BACKUP_MASTERS
)).getBackupMasterNames();
1138 * @return current live region servers list
1139 * @throws IOException if a remote or network exception occurs
1141 default Collection
<ServerName
> getRegionServers() throws IOException
{
1142 return getRegionServers(false);
1146 * Retrieve all current live region servers including decommissioned
1147 * if excludeDecommissionedRS is false, else non-decommissioned ones only
1149 * @param excludeDecommissionedRS should we exclude decommissioned RS nodes
1150 * @return all current live region servers including/excluding decommissioned hosts
1151 * @throws IOException if a remote or network exception occurs
1153 default Collection
<ServerName
> getRegionServers(boolean excludeDecommissionedRS
)
1154 throws IOException
{
1155 List
<ServerName
> allServers
=
1156 getClusterMetrics(EnumSet
.of(Option
.SERVERS_NAME
)).getServersName();
1157 if (!excludeDecommissionedRS
) {
1160 List
<ServerName
> decommissionedRegionServers
= listDecommissionedRegionServers();
1161 return allServers
.stream()
1162 .filter(s
-> !decommissionedRegionServers
.contains(s
))
1163 .collect(ImmutableList
.toImmutableList());
1167 * Get {@link RegionMetrics} of all regions hosted on a regionserver.
1169 * @param serverName region server from which {@link RegionMetrics} is required.
1170 * @return a {@link RegionMetrics} list of all regions hosted on a region server
1171 * @throws IOException if a remote or network exception occurs
1173 List
<RegionMetrics
> getRegionMetrics(ServerName serverName
) throws IOException
;
1176 * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
1178 * @param serverName region server from which {@link RegionMetrics} is required.
1179 * @param tableName get {@link RegionMetrics} of regions belonging to the table
1180 * @return region metrics map of all regions of a table hosted on a region server
1181 * @throws IOException if a remote or network exception occurs
1183 List
<RegionMetrics
> getRegionMetrics(ServerName serverName
,
1184 TableName tableName
) throws IOException
;
1187 * @return Configuration used by the instance.
1189 Configuration
getConfiguration();
1192 * Create a new namespace. Blocks until namespace has been successfully created or an exception is
1194 * @param descriptor descriptor which describes the new namespace.
1195 * @throws IOException if a remote or network exception occurs
1197 default void createNamespace(NamespaceDescriptor descriptor
) throws IOException
{
1198 get(createNamespaceAsync(descriptor
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
1202 * Create a new namespace.
1203 * @param descriptor descriptor which describes the new namespace
1204 * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
1205 * wait on the operation to complete.
1206 * @throws IOException if a remote or network exception occurs
1208 Future
<Void
> createNamespaceAsync(NamespaceDescriptor descriptor
) throws IOException
;
1211 * Modify an existing namespace. Blocks until namespace has been successfully modified or an
1212 * exception is thrown.
1213 * @param descriptor descriptor which describes the new namespace
1214 * @throws IOException if a remote or network exception occurs
1216 default void modifyNamespace(NamespaceDescriptor descriptor
) throws IOException
{
1217 get(modifyNamespaceAsync(descriptor
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
1221 * Modify an existing namespace.
1222 * @param descriptor descriptor which describes the new namespace
1223 * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
1224 * wait on the operation to complete.
1225 * @throws IOException if a remote or network exception occurs
1227 Future
<Void
> modifyNamespaceAsync(NamespaceDescriptor descriptor
) throws IOException
;
1230 * Delete an existing namespace. Only empty namespaces (no tables) can be removed. Blocks until
1231 * namespace has been successfully deleted or an exception is thrown.
1232 * @param name namespace name
1233 * @throws IOException if a remote or network exception occurs
1235 default void deleteNamespace(String name
) throws IOException
{
1236 get(deleteNamespaceAsync(name
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
1240 * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
1241 * @param name namespace name
1242 * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
1243 * wait on the operation to complete.
1244 * @throws IOException if a remote or network exception occurs
1246 Future
<Void
> deleteNamespaceAsync(String name
) throws IOException
;
1249 * Get a namespace descriptor by name.
1250 * @param name name of namespace descriptor
1251 * @return A descriptor
1252 * @throws org.apache.hadoop.hbase.NamespaceNotFoundException
1253 * @throws IOException if a remote or network exception occurs
1255 NamespaceDescriptor
getNamespaceDescriptor(String name
)
1256 throws NamespaceNotFoundException
, IOException
;
1259 * List available namespaces
1261 * @return List of namespace names
1262 * @throws IOException if a remote or network exception occurs
1264 String
[] listNamespaces() throws IOException
;
1267 * List available namespace descriptors
1269 * @return List of descriptors
1270 * @throws IOException if a remote or network exception occurs
1272 NamespaceDescriptor
[] listNamespaceDescriptors() throws IOException
;
1275 * Get list of table descriptors by namespace.
1276 * @param name namespace name
1277 * @return returns a list of TableDescriptors
1278 * @throws IOException if a remote or network exception occurs
1280 List
<TableDescriptor
> listTableDescriptorsByNamespace(byte[] name
) throws IOException
;
1283 * Get list of table names by namespace.
1284 * @param name namespace name
1285 * @return The list of table names in the namespace
1286 * @throws IOException if a remote or network exception occurs
1288 TableName
[] listTableNamesByNamespace(String name
) throws IOException
;
1291 * Get the regions of a given table.
1293 * @param tableName the name of the table
1294 * @return List of {@link RegionInfo}.
1295 * @throws IOException if a remote or network exception occurs
1297 List
<RegionInfo
> getRegions(TableName tableName
) throws IOException
;
1303 * Get tableDescriptors.
1305 * @param tableNames List of table names
1306 * @return returns a list of TableDescriptors
1307 * @throws IOException if a remote or network exception occurs
1309 List
<TableDescriptor
> listTableDescriptors(List
<TableName
> tableNames
)
1313 * Abort a procedure.
1315 * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
1316 * @param procId ID of the procedure to abort
1317 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1318 * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does
1320 * @throws IOException if a remote or network exception occurs
1321 * @deprecated since 2.1.1 and will be removed in 4.0.0.
1322 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21223">HBASE-21223</a>
1325 default boolean abortProcedure(long procId
, boolean mayInterruptIfRunning
) throws IOException
{
1326 return get(abortProcedureAsync(procId
, mayInterruptIfRunning
), getSyncWaitTimeout(),
1327 TimeUnit
.MILLISECONDS
);
1331 * Abort a procedure but does not block and wait for completion.
1332 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1333 * It may throw ExecutionException if there was an error while executing the operation
1334 * or TimeoutException in case the wait timeout was not long enough to allow the
1335 * operation to complete.
1336 * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
1338 * @param procId ID of the procedure to abort
1339 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1340 * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does not exist
1341 * @throws IOException if a remote or network exception occurs
1342 * @deprecated since 2.1.1 and will be removed in 4.0.0.
1343 * @see <a href="https://issues.apache.org/jira/browse/HBASE-21223">HBASE-21223</a>
1346 Future
<Boolean
> abortProcedureAsync(long procId
, boolean mayInterruptIfRunning
)
1351 * @return procedure list in JSON
1352 * @throws IOException if a remote or network exception occurs
1354 String
getProcedures() throws IOException
;
1358 * @return lock list in JSON
1359 * @throws IOException if a remote or network exception occurs
1361 String
getLocks() throws IOException
;
1364 * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
1366 * Note that the actual rolling of the log writer is asynchronous and may not be complete when
1367 * this method returns. As a side effect of this call, the named region server may schedule
1368 * store flushes at the request of the wal.
1370 * @param serverName The servername of the regionserver.
1371 * @throws IOException if a remote or network exception occurs
1372 * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
1374 void rollWALWriter(ServerName serverName
) throws IOException
, FailedLogCloseException
;
1377 * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames().
1378 * @return an array of master coprocessors
1379 * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
1381 default List
<String
> getMasterCoprocessorNames() throws IOException
{
1382 return getClusterMetrics(EnumSet
.of(Option
.MASTER_COPROCESSORS
))
1383 .getMasterCoprocessorNames();
1387 * Get the current compaction state of a table. It could be in a major compaction, a minor
1388 * compaction, both, or none.
1390 * @param tableName table to examine
1391 * @return the current compaction state
1392 * @throws IOException if a remote or network exception occurs
1394 CompactionState
getCompactionState(TableName tableName
) throws IOException
;
1397 * Get the current compaction state of a table. It could be in a compaction, or none.
1399 * @param tableName table to examine
1400 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
1401 * @return the current compaction state
1402 * @throws IOException if a remote or network exception occurs
1404 CompactionState
getCompactionState(TableName tableName
,
1405 CompactType compactType
) throws IOException
;
1408 * Get the current compaction state of region. It could be in a major compaction, a minor
1409 * compaction, both, or none.
1411 * @param regionName region to examine
1412 * @return the current compaction state
1413 * @throws IOException if a remote or network exception occurs
1415 CompactionState
getCompactionStateForRegion(byte[] regionName
) throws IOException
;
1418 * Get the timestamp of the last major compaction for the passed table
1420 * The timestamp of the oldest HFile resulting from a major compaction of that table,
1421 * or 0 if no such HFile could be found.
1423 * @param tableName table to examine
1424 * @return the last major compaction timestamp or 0
1425 * @throws IOException if a remote or network exception occurs
1427 long getLastMajorCompactionTimestamp(TableName tableName
) throws IOException
;
1430 * Get the timestamp of the last major compaction for the passed region.
1432 * The timestamp of the oldest HFile resulting from a major compaction of that region,
1433 * or 0 if no such HFile could be found.
1435 * @param regionName region to examine
1436 * @return the last major compaction timestamp or 0
1437 * @throws IOException if a remote or network exception occurs
1439 long getLastMajorCompactionTimestampForRegion(byte[] regionName
) throws IOException
;
1442 * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
1443 * taken. If the table is disabled, an offline snapshot is taken. Snapshots are taken
1444 * sequentially even when requested concurrently, across all tables. Snapshots are considered
1445 * unique based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same
1446 * name (even a different type or with different parameters) will fail with a
1447 * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
1448 * naming. Snapshot names follow the same naming constraints as tables in HBase. See
1449 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1450 * @param snapshotName name of the snapshot to be created
1451 * @param tableName name of the table for which snapshot is created
1452 * @throws IOException if a remote or network exception occurs
1453 * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
1454 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1456 default void snapshot(String snapshotName
, TableName tableName
)
1457 throws IOException
, SnapshotCreationException
, IllegalArgumentException
{
1458 snapshot(snapshotName
, tableName
, SnapshotType
.FLUSH
);
1462 * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1463 * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across
1464 * all tables. Attempts to take a snapshot with the same name (even a different type or with
1465 * different parameters) will fail with a {@link SnapshotCreationException} indicating the
1466 * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
1467 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1468 * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1469 * snapshots stored on the cluster
1470 * @param tableName name of the table to snapshot
1471 * @param type type of snapshot to take
1472 * @throws IOException we fail to reach the master
1473 * @throws SnapshotCreationException if snapshot creation failed
1474 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1476 default void snapshot(String snapshotName
, TableName tableName
, SnapshotType type
)
1477 throws IOException
, SnapshotCreationException
, IllegalArgumentException
{
1478 snapshot(new SnapshotDescription(snapshotName
, tableName
, type
));
1482 * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1483 * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across
1484 * all tables. Attempts to take a snapshot with the same name (even a different type or with
1485 * different parameters) will fail with a {@link SnapshotCreationException} indicating the
1486 * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
1487 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1488 * Snapshot can live with ttl seconds.
1490 * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1491 * snapshots stored on the cluster
1492 * @param tableName name of the table to snapshot
1493 * @param type type of snapshot to take
1494 * @param snapshotProps snapshot additional properties e.g. TTL
1495 * @throws IOException we fail to reach the master
1496 * @throws SnapshotCreationException if snapshot creation failed
1497 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1499 default void snapshot(String snapshotName
, TableName tableName
, SnapshotType type
,
1500 Map
<String
, Object
> snapshotProps
) throws IOException
,
1501 SnapshotCreationException
, IllegalArgumentException
{
1502 snapshot(new SnapshotDescription(snapshotName
, tableName
, type
, snapshotProps
));
1506 * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1507 * snapshot</b>. Snapshots are taken sequentially even when requested concurrently, across
1508 * all tables. Attempts to take a snapshot with the same name (even a different type or with
1509 * different parameters) will fail with a {@link SnapshotCreationException} indicating the
1510 * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
1511 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1512 * Snapshot can live with ttl seconds.
1514 * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1515 * snapshots stored on the cluster
1516 * @param tableName name of the table to snapshot
1517 * @param snapshotProps snapshot additional properties e.g. TTL
1518 * @throws IOException we fail to reach the master
1519 * @throws SnapshotCreationException if snapshot creation failed
1520 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1522 default void snapshot(String snapshotName
, TableName tableName
,
1523 Map
<String
, Object
> snapshotProps
) throws IOException
,
1524 SnapshotCreationException
, IllegalArgumentException
{
1525 snapshot(new SnapshotDescription(snapshotName
, tableName
, SnapshotType
.FLUSH
, snapshotProps
));
1529 * Take a snapshot and wait for the server to complete that snapshot (blocking). Snapshots are
1530 * considered unique based on <b>the name of the snapshot</b>. Snapshots are taken sequentially
1531 * even when requested concurrently, across all tables. Attempts to take a snapshot with the same
1532 * name (even a different type or with different parameters) will fail with a
1533 * {@link SnapshotCreationException} indicating the duplicate naming. Snapshot names follow the
1534 * same naming constraints as tables in HBase. See
1535 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should
1536 * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure
1537 * about the type of snapshot that you want to take.
1538 * @param snapshot snapshot to take
1539 * @throws IOException or we lose contact with the master.
1540 * @throws SnapshotCreationException if snapshot failed to be taken
1541 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1543 void snapshot(SnapshotDescription snapshot
)
1544 throws IOException
, SnapshotCreationException
, IllegalArgumentException
;
1547 * Take a snapshot without waiting for the server to complete that snapshot (asynchronous).
1548 * Snapshots are considered unique based on <b>the name of the snapshot</b>. Snapshots are taken
1549 * sequentially even when requested concurrently, across all tables.
1551 * @param snapshot snapshot to take
1552 * @throws IOException if the snapshot did not succeed or we lose contact with the master.
1553 * @throws SnapshotCreationException if snapshot creation failed
1554 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1556 Future
<Void
> snapshotAsync(SnapshotDescription snapshot
)
1557 throws IOException
, SnapshotCreationException
;
1560 * Check the current state of the passed snapshot. There are three possible states: <ol>
1561 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1562 * <li>finished with error - throws the exception that caused the snapshot to fail</li> </ol> The
1563 * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
1564 * run/started since the snapshot you are checking, you will receive an {@link
1565 * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
1567 * @param snapshot description of the snapshot to check
1568 * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
1570 * @throws IOException if we have a network issue
1571 * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed
1572 * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
1575 boolean isSnapshotFinished(SnapshotDescription snapshot
)
1576 throws IOException
, HBaseSnapshotException
, UnknownSnapshotException
;
1579 * Restore the specified snapshot on the original table. (The table must be disabled) If the
1580 * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
1581 * <code>true</code>, a snapshot of the current table is taken before executing the restore
1582 * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
1583 * completes without problem the failsafe snapshot is deleted.
1584 * @param snapshotName name of the snapshot to restore
1585 * @throws IOException if a remote or network exception occurs
1586 * @throws RestoreSnapshotException if snapshot failed to be restored
1587 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1589 void restoreSnapshot(String snapshotName
) throws IOException
, RestoreSnapshotException
;
1592 * Restore the specified snapshot on the original table. (The table must be disabled) If
1593 * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1594 * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1595 * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1596 * failsafe snapshot name is configurable by using the property
1597 * "hbase.snapshot.restore.failsafe.name".
1598 * @param snapshotName name of the snapshot to restore
1599 * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1600 * @throws IOException if a remote or network exception occurs
1601 * @throws RestoreSnapshotException if snapshot failed to be restored
1602 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1604 default void restoreSnapshot(String snapshotName
, boolean takeFailSafeSnapshot
)
1605 throws IOException
, RestoreSnapshotException
{
1606 restoreSnapshot(snapshotName
, takeFailSafeSnapshot
, false);
1610 * Restore the specified snapshot on the original table. (The table must be disabled) If
1611 * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1612 * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1613 * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1614 * failsafe snapshot name is configurable by using the property
1615 * "hbase.snapshot.restore.failsafe.name".
1616 * @param snapshotName name of the snapshot to restore
1617 * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1618 * @param restoreAcl <code>true</code> to restore acl of snapshot
1619 * @throws IOException if a remote or network exception occurs
1620 * @throws RestoreSnapshotException if snapshot failed to be restored
1621 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1623 void restoreSnapshot(String snapshotName
, boolean takeFailSafeSnapshot
, boolean restoreAcl
)
1624 throws IOException
, RestoreSnapshotException
;
1627 * Create a new table by cloning the snapshot content.
1628 * @param snapshotName name of the snapshot to be cloned
1629 * @param tableName name of the table where the snapshot will be restored
1630 * @throws IOException if a remote or network exception occurs
1631 * @throws TableExistsException if table to be created already exists
1632 * @throws RestoreSnapshotException if snapshot failed to be cloned
1633 * @throws IllegalArgumentException if the specified table has not a valid name
1635 default void cloneSnapshot(String snapshotName
, TableName tableName
)
1636 throws IOException
, TableExistsException
, RestoreSnapshotException
{
1637 cloneSnapshot(snapshotName
, tableName
, false, null);
1641 * Create a new table by cloning the snapshot content.
1642 * @param snapshotName name of the snapshot to be cloned
1643 * @param tableName name of the table where the snapshot will be restored
1644 * @param restoreAcl <code>true</code> to clone acl into newly created table
1645 * @param customSFT specify the StoreFileTracker used for the table
1646 * @throws IOException if a remote or network exception occurs
1647 * @throws TableExistsException if table to be created already exists
1648 * @throws RestoreSnapshotException if snapshot failed to be cloned
1649 * @throws IllegalArgumentException if the specified table has not a valid name
1651 default void cloneSnapshot(String snapshotName
, TableName tableName
, boolean restoreAcl
,
1653 throws IOException
, TableExistsException
, RestoreSnapshotException
{
1654 get(cloneSnapshotAsync(snapshotName
, tableName
, restoreAcl
, customSFT
), getSyncWaitTimeout(),
1655 TimeUnit
.MILLISECONDS
);
1659 * Create a new table by cloning the snapshot content.
1660 * @param snapshotName name of the snapshot to be cloned
1661 * @param tableName name of the table where the snapshot will be restored
1662 * @param restoreAcl <code>true</code> to clone acl into newly created table
1663 * @throws IOException if a remote or network exception occurs
1664 * @throws TableExistsException if table to be created already exists
1665 * @throws RestoreSnapshotException if snapshot failed to be cloned
1666 * @throws IllegalArgumentException if the specified table has not a valid name
1668 default void cloneSnapshot(String snapshotName
, TableName tableName
, boolean restoreAcl
)
1669 throws IOException
, TableExistsException
, RestoreSnapshotException
{
1670 get(cloneSnapshotAsync(snapshotName
, tableName
, restoreAcl
), getSyncWaitTimeout(),
1671 TimeUnit
.MILLISECONDS
);
1675 * Create a new table by cloning the snapshot content, but does not block and wait for it to be
1676 * completely cloned. You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1677 * It may throw ExecutionException if there was an error while executing the operation or
1678 * TimeoutException in case the wait timeout was not long enough to allow the operation to
1680 * @param snapshotName name of the snapshot to be cloned
1681 * @param tableName name of the table where the snapshot will be restored
1682 * @throws IOException if a remote or network exception occurs
1683 * @throws TableExistsException if table to be cloned already exists
1684 * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait
1685 * on the operation to complete.
1687 default Future
<Void
> cloneSnapshotAsync(String snapshotName
, TableName tableName
)
1688 throws IOException
, TableExistsException
{
1689 return cloneSnapshotAsync(snapshotName
, tableName
, false);
1693 * Create a new table by cloning the snapshot content.
1694 * @param snapshotName name of the snapshot to be cloned
1695 * @param tableName name of the table where the snapshot will be restored
1696 * @param restoreAcl <code>true</code> to clone acl into newly created table
1697 * @throws IOException if a remote or network exception occurs
1698 * @throws TableExistsException if table to be created already exists
1699 * @throws RestoreSnapshotException if snapshot failed to be cloned
1700 * @throws IllegalArgumentException if the specified table has not a valid name
1702 default Future
<Void
> cloneSnapshotAsync(String snapshotName
, TableName tableName
,
1704 throws IOException
, TableExistsException
, RestoreSnapshotException
{
1705 return cloneSnapshotAsync(snapshotName
, tableName
, restoreAcl
, null);
1709 * Create a new table by cloning the snapshot content.
1710 * @param snapshotName name of the snapshot to be cloned
1711 * @param tableName name of the table where the snapshot will be restored
1712 * @param restoreAcl <code>true</code> to clone acl into newly created table
1713 * @param customSFT specify the StroreFileTracker used for the table
1714 * @throws IOException if a remote or network exception occurs
1715 * @throws TableExistsException if table to be created already exists
1716 * @throws RestoreSnapshotException if snapshot failed to be cloned
1717 * @throws IllegalArgumentException if the specified table has not a valid name
1719 Future
<Void
> cloneSnapshotAsync(String snapshotName
, TableName tableName
, boolean restoreAcl
,
1720 String customSFT
) throws IOException
, TableExistsException
, RestoreSnapshotException
;
1723 * Execute a distributed procedure on a cluster.
1725 * @param signature A distributed procedure is uniquely identified by its signature (default the
1726 * root ZK node name of the procedure).
1727 * @param instance The instance name of the procedure. For some procedures, this parameter is
1729 * @param props Property/Value pairs of properties passing to the procedure
1730 * @throws IOException if a remote or network exception occurs
1732 void execProcedure(String signature
, String instance
, Map
<String
, String
> props
)
1736 * Execute a distributed procedure on a cluster.
1738 * @param signature A distributed procedure is uniquely identified by its signature (default the
1739 * root ZK node name of the procedure).
1740 * @param instance The instance name of the procedure. For some procedures, this parameter is
1742 * @param props Property/Value pairs of properties passing to the procedure
1743 * @return data returned after procedure execution. null if no return data.
1744 * @throws IOException if a remote or network exception occurs
1746 byte[] execProcedureWithReturn(String signature
, String instance
, Map
<String
, String
> props
)
1750 * Check the current state of the specified procedure. There are three possible states: <ol>
1751 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1752 * <li>finished with error - throws the exception that caused the procedure to fail</li> </ol>
1754 * @param signature The signature that uniquely identifies a procedure
1755 * @param instance The instance name of the procedure
1756 * @param props Property/Value pairs of properties passing to the procedure
1757 * @return <code>true</code> if the specified procedure is finished successfully, <code>false</code> if it is still running
1758 * @throws IOException if the specified procedure finished with error
1760 boolean isProcedureFinished(String signature
, String instance
, Map
<String
, String
> props
)
1764 * List completed snapshots.
1766 * @return a list of snapshot descriptors for completed snapshots
1767 * @throws IOException if a network error occurs
1769 List
<SnapshotDescription
> listSnapshots() throws IOException
;
1772 * List all the completed snapshots matching the given pattern.
1774 * @param pattern The compiled regular expression to match against
1775 * @return list of SnapshotDescription
1776 * @throws IOException if a remote or network exception occurs
1778 List
<SnapshotDescription
> listSnapshots(Pattern pattern
) throws IOException
;
1781 * List all the completed snapshots matching the given table name regular expression and snapshot
1782 * name regular expression.
1783 * @param tableNamePattern The compiled table name regular expression to match against
1784 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1785 * @return list of completed SnapshotDescription
1786 * @throws IOException if a remote or network exception occurs
1788 List
<SnapshotDescription
> listTableSnapshots(Pattern tableNamePattern
,
1789 Pattern snapshotNamePattern
) throws IOException
;
1792 * Delete an existing snapshot.
1794 * @param snapshotName name of the snapshot
1795 * @throws IOException if a remote or network exception occurs
1797 void deleteSnapshot(String snapshotName
) throws IOException
;
1800 * Delete existing snapshots whose names match the pattern passed.
1802 * @param pattern pattern for names of the snapshot to match
1803 * @throws IOException if a remote or network exception occurs
1805 void deleteSnapshots(Pattern pattern
) throws IOException
;
1808 * Delete all existing snapshots matching the given table name regular expression and snapshot
1809 * name regular expression.
1810 * @param tableNamePattern The compiled table name regular expression to match against
1811 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1812 * @throws IOException if a remote or network exception occurs
1814 void deleteTableSnapshots(Pattern tableNamePattern
, Pattern snapshotNamePattern
)
1818 * Apply the new quota settings.
1820 * @param quota the quota settings
1821 * @throws IOException if a remote or network exception occurs
1823 void setQuota(QuotaSettings quota
) throws IOException
;
1826 * List the quotas based on the filter.
1827 * @param filter the quota settings filter
1828 * @return the QuotaSetting list
1829 * @throws IOException if a remote or network exception occurs
1831 List
<QuotaSettings
> getQuota(QuotaFilter filter
) throws IOException
;
1834 * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
1835 * instance connected to the active master.
1837 * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
1838 * used to access a published coprocessor
1839 * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
1842 * <div style="background-color: #cccccc; padding: 2px">
1845 * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
1846 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1847 * MyCallRequest request = MyCallRequest.newBuilder()
1850 * MyCallResponse response = service.myCall(null, request);
1854 * @return A MasterCoprocessorRpcChannel instance
1855 * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
1856 * more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
1859 CoprocessorRpcChannel
coprocessorService();
1863 * Creates and returns a {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel}
1864 * instance connected to the passed region server.
1866 * The obtained {@link org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel} instance can be
1867 * used to access a published coprocessor
1868 * {@link org.apache.hbase.thirdparty.com.google.protobuf.Service} using standard protobuf service
1871 * <div style="background-color: #cccccc; padding: 2px"> <blockquote>
1873 * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
1874 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1875 * MyCallRequest request = MyCallRequest.newBuilder()
1878 * MyCallResponse response = service.myCall(null, request);
1882 * @param serverName the server name to which the endpoint call is made
1883 * @return A RegionServerCoprocessorRpcChannel instance
1884 * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any
1885 * more. Use the coprocessorService methods in {@link AsyncAdmin} instead.
1888 CoprocessorRpcChannel
coprocessorService(ServerName serverName
);
1892 * Update the configuration and trigger an online config change
1893 * on the regionserver.
1894 * @param server : The server whose config needs to be updated.
1895 * @throws IOException if a remote or network exception occurs
1897 void updateConfiguration(ServerName server
) throws IOException
;
1900 * Update the configuration and trigger an online config change
1901 * on all the regionservers.
1902 * @throws IOException if a remote or network exception occurs
1904 void updateConfiguration() throws IOException
;
1907 * Update the configuration and trigger an online config change
1908 * on all the regionservers in the RSGroup.
1909 * @param groupName the group name
1910 * @throws IOException if a remote or network exception occurs
1912 void updateConfiguration(String groupName
) throws IOException
;
1915 * Get the info port of the current master if one is available.
1916 * @return master info port
1917 * @throws IOException if a remote or network exception occurs
1919 default int getMasterInfoPort() throws IOException
{
1920 return getClusterMetrics(EnumSet
.of(Option
.MASTER_INFO_PORT
)).getMasterInfoPort();
1924 * Return the set of supported security capabilities.
1925 * @throws IOException if a remote or network exception occurs
1926 * @throws UnsupportedOperationException
1928 List
<SecurityCapability
> getSecurityCapabilities() throws IOException
;
1931 * Turn the split switch on or off.
1932 * @param enabled enabled or not
1933 * @param synchronous If <code>true</code>, it waits until current split() call, if outstanding,
1935 * @return Previous switch value
1936 * @throws IOException if a remote or network exception occurs
1938 boolean splitSwitch(boolean enabled
, boolean synchronous
) throws IOException
;
1941 * Turn the merge switch on or off.
1942 * @param enabled enabled or not
1943 * @param synchronous If <code>true</code>, it waits until current merge() call, if outstanding,
1945 * @return Previous switch value
1946 * @throws IOException if a remote or network exception occurs
1948 boolean mergeSwitch(boolean enabled
, boolean synchronous
) throws IOException
;
1951 * Query the current state of the split switch.
1952 * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1953 * @throws IOException if a remote or network exception occurs
1955 boolean isSplitEnabled() throws IOException
;
1958 * Query the current state of the merge switch.
1959 * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1960 * @throws IOException if a remote or network exception occurs
1962 boolean isMergeEnabled() throws IOException
;
1965 * Add a new replication peer for replicating data to slave cluster.
1966 * @param peerId a short name that identifies the peer
1967 * @param peerConfig configuration for the replication peer
1968 * @throws IOException if a remote or network exception occurs
1970 default void addReplicationPeer(String peerId
, ReplicationPeerConfig peerConfig
)
1971 throws IOException
{
1972 addReplicationPeer(peerId
, peerConfig
, true);
1976 * Add a new replication peer for replicating data to slave cluster.
1977 * @param peerId a short name that identifies the peer
1978 * @param peerConfig configuration for the replication peer
1979 * @param enabled peer state, true if ENABLED and false if DISABLED
1980 * @throws IOException if a remote or network exception occurs
1982 default void addReplicationPeer(String peerId
, ReplicationPeerConfig peerConfig
, boolean enabled
)
1983 throws IOException
{
1984 get(addReplicationPeerAsync(peerId
, peerConfig
, enabled
), getSyncWaitTimeout(),
1985 TimeUnit
.MILLISECONDS
);
1989 * Add a new replication peer but does not block and wait for it.
1991 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1992 * ExecutionException if there was an error while executing the operation or TimeoutException in
1993 * case the wait timeout was not long enough to allow the operation to complete.
1994 * @param peerId a short name that identifies the peer
1995 * @param peerConfig configuration for the replication peer
1996 * @return the result of the async operation
1997 * @throws IOException IOException if a remote or network exception occurs
1999 default Future
<Void
> addReplicationPeerAsync(String peerId
, ReplicationPeerConfig peerConfig
)
2000 throws IOException
{
2001 return addReplicationPeerAsync(peerId
, peerConfig
, true);
2005 * Add a new replication peer but does not block and wait for it.
2007 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2008 * ExecutionException if there was an error while executing the operation or TimeoutException in
2009 * case the wait timeout was not long enough to allow the operation to complete.
2010 * @param peerId a short name that identifies the peer
2011 * @param peerConfig configuration for the replication peer
2012 * @param enabled peer state, true if ENABLED and false if DISABLED
2013 * @return the result of the async operation
2014 * @throws IOException IOException if a remote or network exception occurs
2016 Future
<Void
> addReplicationPeerAsync(String peerId
, ReplicationPeerConfig peerConfig
,
2017 boolean enabled
) throws IOException
;
2020 * Remove a peer and stop the replication.
2021 * @param peerId a short name that identifies the peer
2022 * @throws IOException if a remote or network exception occurs
2024 default void removeReplicationPeer(String peerId
) throws IOException
{
2025 get(removeReplicationPeerAsync(peerId
), getSyncWaitTimeout(),
2026 TimeUnit
.MILLISECONDS
);
2030 * Remove a replication peer but does not block and wait for it.
2032 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2033 * ExecutionException if there was an error while executing the operation or TimeoutException in
2034 * case the wait timeout was not long enough to allow the operation to complete.
2035 * @param peerId a short name that identifies the peer
2036 * @return the result of the async operation
2037 * @throws IOException IOException if a remote or network exception occurs
2039 Future
<Void
> removeReplicationPeerAsync(String peerId
) throws IOException
;
2042 * Restart the replication stream to the specified peer.
2043 * @param peerId a short name that identifies the peer
2044 * @throws IOException if a remote or network exception occurs
2046 default void enableReplicationPeer(String peerId
) throws IOException
{
2047 get(enableReplicationPeerAsync(peerId
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
2051 * Enable a replication peer but does not block and wait for it.
2053 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2054 * ExecutionException if there was an error while executing the operation or TimeoutException in
2055 * case the wait timeout was not long enough to allow the operation to complete.
2056 * @param peerId a short name that identifies the peer
2057 * @return the result of the async operation
2058 * @throws IOException IOException if a remote or network exception occurs
2060 Future
<Void
> enableReplicationPeerAsync(String peerId
) throws IOException
;
2063 * Stop the replication stream to the specified peer.
2064 * @param peerId a short name that identifies the peer
2065 * @throws IOException if a remote or network exception occurs
2067 default void disableReplicationPeer(String peerId
) throws IOException
{
2068 get(disableReplicationPeerAsync(peerId
), getSyncWaitTimeout(), TimeUnit
.MILLISECONDS
);
2072 * Disable a replication peer but does not block and wait for it.
2074 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2075 * ExecutionException if there was an error while executing the operation or TimeoutException in
2076 * case the wait timeout was not long enough to allow the operation to complete.
2077 * @param peerId a short name that identifies the peer
2078 * @return the result of the async operation
2079 * @throws IOException IOException if a remote or network exception occurs
2081 Future
<Void
> disableReplicationPeerAsync(String peerId
) throws IOException
;
2084 * Returns the configured ReplicationPeerConfig for the specified peer.
2085 * @param peerId a short name that identifies the peer
2086 * @return ReplicationPeerConfig for the peer
2087 * @throws IOException if a remote or network exception occurs
2089 ReplicationPeerConfig
getReplicationPeerConfig(String peerId
) throws IOException
;
2092 * Update the peerConfig for the specified peer.
2093 * @param peerId a short name that identifies the peer
2094 * @param peerConfig new config for the replication peer
2095 * @throws IOException if a remote or network exception occurs
2097 default void updateReplicationPeerConfig(String peerId
, ReplicationPeerConfig peerConfig
)
2098 throws IOException
{
2099 get(updateReplicationPeerConfigAsync(peerId
, peerConfig
), getSyncWaitTimeout(),
2100 TimeUnit
.MILLISECONDS
);
2104 * Update the peerConfig for the specified peer but does not block and wait for it.
2106 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2107 * ExecutionException if there was an error while executing the operation or TimeoutException in
2108 * case the wait timeout was not long enough to allow the operation to complete.
2109 * @param peerId a short name that identifies the peer
2110 * @param peerConfig new config for the replication peer
2111 * @return the result of the async operation
2112 * @throws IOException IOException if a remote or network exception occurs
2114 Future
<Void
> updateReplicationPeerConfigAsync(String peerId
, ReplicationPeerConfig peerConfig
)
2118 * Append the replicable table column family config from the specified peer.
2119 * @param id a short that identifies the cluster
2120 * @param tableCfs A map from tableName to column family names
2121 * @throws ReplicationException if tableCfs has conflict with existing config
2122 * @throws IOException if a remote or network exception occurs
2124 default void appendReplicationPeerTableCFs(String id
, Map
<TableName
, List
<String
>> tableCfs
)
2125 throws ReplicationException
, IOException
{
2126 if (tableCfs
== null) {
2127 throw new ReplicationException("tableCfs is null");
2129 ReplicationPeerConfig peerConfig
= getReplicationPeerConfig(id
);
2130 ReplicationPeerConfig newPeerConfig
=
2131 ReplicationPeerConfigUtil
.appendTableCFsToReplicationPeerConfig(tableCfs
, peerConfig
);
2132 updateReplicationPeerConfig(id
, newPeerConfig
);
2136 * Remove some table-cfs from config of the specified peer.
2137 * @param id a short name that identifies the cluster
2138 * @param tableCfs A map from tableName to column family names
2139 * @throws ReplicationException if tableCfs has conflict with existing config
2140 * @throws IOException if a remote or network exception occurs
2142 default void removeReplicationPeerTableCFs(String id
, Map
<TableName
, List
<String
>> tableCfs
)
2143 throws ReplicationException
, IOException
{
2144 if (tableCfs
== null) {
2145 throw new ReplicationException("tableCfs is null");
2147 ReplicationPeerConfig peerConfig
= getReplicationPeerConfig(id
);
2148 ReplicationPeerConfig newPeerConfig
=
2149 ReplicationPeerConfigUtil
.removeTableCFsFromReplicationPeerConfig(tableCfs
, peerConfig
, id
);
2150 updateReplicationPeerConfig(id
, newPeerConfig
);
2154 * Return a list of replication peers.
2155 * @return a list of replication peers description
2156 * @throws IOException if a remote or network exception occurs
2158 List
<ReplicationPeerDescription
> listReplicationPeers() throws IOException
;
2161 * Return a list of replication peers.
2162 * @param pattern The compiled regular expression to match peer id
2163 * @return a list of replication peers description
2164 * @throws IOException if a remote or network exception occurs
2166 List
<ReplicationPeerDescription
> listReplicationPeers(Pattern pattern
) throws IOException
;
2169 * Transit current cluster to a new state in a synchronous replication peer.
2170 * @param peerId a short name that identifies the peer
2171 * @param state a new state of current cluster
2172 * @throws IOException if a remote or network exception occurs
2174 default void transitReplicationPeerSyncReplicationState(String peerId
, SyncReplicationState state
)
2175 throws IOException
{
2176 get(transitReplicationPeerSyncReplicationStateAsync(peerId
, state
), getSyncWaitTimeout(),
2177 TimeUnit
.MILLISECONDS
);
2181 * Transit current cluster to a new state in a synchronous replication peer. But does not block
2184 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
2185 * ExecutionException if there was an error while executing the operation or TimeoutException in
2186 * case the wait timeout was not long enough to allow the operation to complete.
2187 * @param peerId a short name that identifies the peer
2188 * @param state a new state of current cluster
2189 * @throws IOException if a remote or network exception occurs
2191 Future
<Void
> transitReplicationPeerSyncReplicationStateAsync(String peerId
,
2192 SyncReplicationState state
) throws IOException
;
2195 * Get the current cluster state in a synchronous replication peer.
2196 * @param peerId a short name that identifies the peer
2197 * @return the current cluster state
2198 * @throws IOException if a remote or network exception occurs
2200 default SyncReplicationState
getReplicationPeerSyncReplicationState(String peerId
)
2201 throws IOException
{
2202 List
<ReplicationPeerDescription
> peers
= listReplicationPeers(Pattern
.compile(peerId
));
2203 if (peers
.isEmpty() || !peers
.get(0).getPeerId().equals(peerId
)) {
2204 throw new IOException("Replication peer " + peerId
+ " does not exist");
2206 return peers
.get(0).getSyncReplicationState();
2210 * Mark region server(s) as decommissioned to prevent additional regions from getting
2211 * assigned to them. Optionally unload the regions on the servers. If there are multiple servers
2212 * to be decommissioned, decommissioning them at the same time can prevent wasteful region
2213 * movements. Region unloading is asynchronous.
2214 * @param servers The list of servers to decommission.
2215 * @param offload True to offload the regions from the decommissioned servers
2216 * @throws IOException if a remote or network exception occurs
2218 void decommissionRegionServers(List
<ServerName
> servers
, boolean offload
) throws IOException
;
2221 * List region servers marked as decommissioned, which can not be assigned regions.
2222 * @return List of decommissioned region servers.
2223 * @throws IOException if a remote or network exception occurs
2225 List
<ServerName
> listDecommissionedRegionServers() throws IOException
;
2228 * Remove decommission marker from a region server to allow regions assignments.
2229 * Load regions onto the server if a list of regions is given. Region loading is
2231 * @param server The server to recommission.
2232 * @param encodedRegionNames Regions to load onto the server.
2233 * @throws IOException if a remote or network exception occurs
2235 void recommissionRegionServer(ServerName server
, List
<byte[]> encodedRegionNames
)
2239 * Find all table and column families that are replicated from this cluster
2240 * @return the replicated table-cfs list of this cluster.
2241 * @throws IOException if a remote or network exception occurs
2243 List
<TableCFs
> listReplicatedTableCFs() throws IOException
;
2246 * Enable a table's replication switch.
2247 * @param tableName name of the table
2248 * @throws IOException if a remote or network exception occurs
2250 void enableTableReplication(TableName tableName
) throws IOException
;
2253 * Disable a table's replication switch.
2254 * @param tableName name of the table
2255 * @throws IOException if a remote or network exception occurs
2257 void disableTableReplication(TableName tableName
) throws IOException
;
2260 * Clear compacting queues on a regionserver.
2261 * @param serverName the region server name
2262 * @param queues the set of queue name
2263 * @throws IOException if a remote or network exception occurs
2264 * @throws InterruptedException
2266 void clearCompactionQueues(ServerName serverName
, Set
<String
> queues
)
2267 throws IOException
, InterruptedException
;
2270 * List dead region servers.
2271 * @return List of dead region servers.
2273 default List
<ServerName
> listDeadServers() throws IOException
{
2274 return getClusterMetrics(EnumSet
.of(Option
.DEAD_SERVERS
)).getDeadServerNames();
2278 * Clear dead region servers from master.
2279 * @param servers list of dead region servers.
2280 * @throws IOException if a remote or network exception occurs
2281 * @return List of servers that are not cleared
2283 List
<ServerName
> clearDeadServers(List
<ServerName
> servers
) throws IOException
;
2286 * Create a new table by cloning the existent table schema.
2287 * @param tableName name of the table to be cloned
2288 * @param newTableName name of the new table where the table will be created
2289 * @param preserveSplits True if the splits should be preserved
2290 * @throws IOException if a remote or network exception occurs
2292 void cloneTableSchema(TableName tableName
, TableName newTableName
, boolean preserveSplits
)
2296 * Switch the rpc throttle enable state.
2297 * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
2298 * @return Previous rpc throttle enabled value
2299 * @throws IOException if a remote or network exception occurs
2301 boolean switchRpcThrottle(boolean enable
) throws IOException
;
2304 * Get if the rpc throttle is enabled.
2305 * @return True if rpc throttle is enabled
2306 * @throws IOException if a remote or network exception occurs
2308 boolean isRpcThrottleEnabled() throws IOException
;
2311 * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota
2312 * can be exceeded if region server has availble quota.
2313 * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
2314 * @return Previous exceed throttle enabled value
2315 * @throws IOException if a remote or network exception occurs
2317 boolean exceedThrottleQuotaSwitch(final boolean enable
) throws IOException
;
2320 * Fetches the table sizes on the filesystem as tracked by the HBase Master.
2321 * @throws IOException if a remote or network exception occurs
2323 Map
<TableName
, Long
> getSpaceQuotaTableSizes() throws IOException
;
2326 * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer.
2327 * @throws IOException if a remote or network exception occurs
2329 Map
<TableName
, ?
extends SpaceQuotaSnapshotView
> getRegionServerSpaceQuotaSnapshots(
2330 ServerName serverName
) throws IOException
;
2333 * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has
2334 * no quota information on that namespace.
2335 * @throws IOException if a remote or network exception occurs
2337 SpaceQuotaSnapshotView
getCurrentSpaceQuotaSnapshot(String namespace
) throws IOException
;
2340 * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has
2341 * no quota information on that table.
2342 * @throws IOException if a remote or network exception occurs
2344 SpaceQuotaSnapshotView
getCurrentSpaceQuotaSnapshot(TableName tableName
) throws IOException
;
2347 * Grants user specific permissions
2348 * @param userPermission user name and the specific permission
2349 * @param mergeExistingPermissions If set to false, later granted permissions will override
2350 * previous granted permissions. otherwise, it'll merge with previous granted
2352 * @throws IOException if a remote or network exception occurs
2354 void grant(UserPermission userPermission
, boolean mergeExistingPermissions
) throws IOException
;
2357 * Revokes user specific permissions
2358 * @param userPermission user name and the specific permission
2359 * @throws IOException if a remote or network exception occurs
2361 void revoke(UserPermission userPermission
) throws IOException
;
2364 * Get the global/namespace/table permissions for user
2365 * @param getUserPermissionsRequest A request contains which user, global, namespace or table
2366 * permissions needed
2367 * @return The user and permission list
2368 * @throws IOException if a remote or network exception occurs
2370 List
<UserPermission
> getUserPermissions(GetUserPermissionsRequest getUserPermissionsRequest
)
2374 * Check if the user has specific permissions
2375 * @param userName the user name
2376 * @param permissions the specific permission list
2377 * @return True if user has the specific permissions
2378 * @throws IOException if a remote or network exception occurs
2380 List
<Boolean
> hasUserPermissions(String userName
, List
<Permission
> permissions
)
2384 * Check if call user has specific permissions
2385 * @param permissions the specific permission list
2386 * @return True if user has the specific permissions
2387 * @throws IOException if a remote or network exception occurs
2389 default List
<Boolean
> hasUserPermissions(List
<Permission
> permissions
) throws IOException
{
2390 return hasUserPermissions(null, permissions
);
2394 * Turn on or off the auto snapshot cleanup based on TTL.
2396 * @param on Set to <code>true</code> to enable, <code>false</code> to disable.
2397 * @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
2399 * @return Previous auto snapshot cleanup value
2400 * @throws IOException if a remote or network exception occurs
2402 boolean snapshotCleanupSwitch(final boolean on
, final boolean synchronous
)
2406 * Query the current state of the auto snapshot cleanup based on TTL.
2408 * @return <code>true</code> if the auto snapshot cleanup is enabled,
2409 * <code>false</code> otherwise.
2410 * @throws IOException if a remote or network exception occurs
2412 boolean isSnapshotCleanupEnabled() throws IOException
;
2415 * Retrieves online slow/large RPC logs from the provided list of
2418 * @param serverNames Server names to get slowlog responses from
2419 * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs)
2420 * @return online slowlog response list
2421 * @throws IOException if a remote or network exception occurs
2422 * @deprecated since 2.4.0 and will be removed in 4.0.0.
2423 * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead.
2426 default List
<OnlineLogRecord
> getSlowLogResponses(final Set
<ServerName
> serverNames
,
2427 final LogQueryFilter logQueryFilter
) throws IOException
{
2429 if (LogQueryFilter
.Type
.LARGE_LOG
.equals(logQueryFilter
.getType())) {
2430 logType
= "LARGE_LOG";
2432 logType
= "SLOW_LOG";
2434 Map
<String
, Object
> filterParams
= new HashMap
<>();
2435 filterParams
.put("regionName", logQueryFilter
.getRegionName());
2436 filterParams
.put("clientAddress", logQueryFilter
.getClientAddress());
2437 filterParams
.put("tableName", logQueryFilter
.getTableName());
2438 filterParams
.put("userName", logQueryFilter
.getUserName());
2439 filterParams
.put("filterByOperator", logQueryFilter
.getFilterByOperator().toString());
2440 List
<LogEntry
> logEntries
=
2441 getLogEntries(serverNames
, logType
, ServerType
.REGION_SERVER
, logQueryFilter
.getLimit(),
2443 return logEntries
.stream().map(logEntry
-> (OnlineLogRecord
) logEntry
)
2444 .collect(Collectors
.toList());
2448 * Clears online slow/large RPC logs from the provided list of
2451 * @param serverNames Set of Server names to clean slowlog responses from
2452 * @return List of booleans representing if online slowlog response buffer is cleaned
2453 * from each RegionServer
2454 * @throws IOException if a remote or network exception occurs
2456 List
<Boolean
> clearSlowLogResponses(final Set
<ServerName
> serverNames
)
2460 * Creates a new RegionServer group with the given name
2461 * @param groupName the name of the group
2462 * @throws IOException if a remote or network exception occurs
2464 void addRSGroup(String groupName
) throws IOException
;
2467 * Get group info for the given group name
2468 * @param groupName the group name
2469 * @return group info
2470 * @throws IOException if a remote or network exception occurs
2472 RSGroupInfo
getRSGroup(String groupName
) throws IOException
;
2475 * Get group info for the given hostPort
2476 * @param hostPort HostPort to get RSGroupInfo for
2477 * @throws IOException if a remote or network exception occurs
2479 RSGroupInfo
getRSGroup(Address hostPort
) throws IOException
;
2482 * Get group info for the given table
2483 * @param tableName table name to get RSGroupInfo for
2484 * @throws IOException if a remote or network exception occurs
2486 RSGroupInfo
getRSGroup(TableName tableName
) throws IOException
;
2489 * Lists current set of RegionServer groups
2490 * @throws IOException if a remote or network exception occurs
2492 List
<RSGroupInfo
> listRSGroups() throws IOException
;
2495 * Get all tables in this RegionServer group.
2496 * @param groupName the group name
2497 * @throws IOException if a remote or network exception occurs
2498 * @see #getConfiguredNamespacesAndTablesInRSGroup(String)
2500 List
<TableName
> listTablesInRSGroup(String groupName
) throws IOException
;
2503 * Get the namespaces and tables which have this RegionServer group in descriptor.
2505 * The difference between this method and {@link #listTablesInRSGroup(String)} is that, this
2506 * method will not include the table which is actually in this RegionServr group but without the
2507 * RegionServer group configuration in its {@link TableDescriptor}. For example, we have a group
2508 * 'A', and we make namespace 'nsA' in this group, then all the tables under this namespace will
2509 * in the group 'A', but this method will not return these tables but only the namespace 'nsA',
2510 * while the {@link #listTablesInRSGroup(String)} will return all these tables.
2511 * @param groupName the group name
2512 * @throws IOException if a remote or network exception occurs
2513 * @see #listTablesInRSGroup(String)
2515 Pair
<List
<String
>, List
<TableName
>> getConfiguredNamespacesAndTablesInRSGroup(String groupName
)
2519 * Remove RegionServer group associated with the given name
2520 * @param groupName the group name
2521 * @throws IOException if a remote or network exception occurs
2523 void removeRSGroup(String groupName
) throws IOException
;
2526 * Remove decommissioned servers from group
2527 * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline
2528 * the server for repairing. Or we need to move some servers to join other clusters.
2529 * So we need to remove these servers from the group.
2530 * 2. Dead/recovering/live servers will be disallowed.
2531 * @param servers set of servers to remove
2532 * @throws IOException if a remote or network exception occurs
2534 void removeServersFromRSGroup(Set
<Address
> servers
) throws IOException
;
2537 * Move given set of servers to the specified target RegionServer group
2538 * @param servers set of servers to move
2539 * @param targetGroup the group to move servers to
2540 * @throws IOException if a remote or network exception occurs
2542 void moveServersToRSGroup(Set
<Address
> servers
, String targetGroup
) throws IOException
;
2545 * Set the RegionServer group for tables
2546 * @param tables tables to set group for
2547 * @param groupName group name for tables
2548 * @throws IOException if a remote or network exception occurs
2550 void setRSGroup(Set
<TableName
> tables
, String groupName
) throws IOException
;
2553 * Balance regions in the given RegionServer group
2554 * @param groupName the group name
2555 * @return BalanceResponse details about the balancer run
2556 * @throws IOException if a remote or network exception occurs
2558 default BalanceResponse
balanceRSGroup(String groupName
) throws IOException
{
2559 return balanceRSGroup(groupName
, BalanceRequest
.defaultInstance());
2563 * Balance regions in the given RegionServer group, running based on
2564 * the given {@link BalanceRequest}.
2566 * @return BalanceResponse details about the balancer run
2568 BalanceResponse
balanceRSGroup(String groupName
, BalanceRequest request
) throws IOException
;
2572 * @param oldName old rsgroup name
2573 * @param newName new rsgroup name
2574 * @throws IOException if a remote or network exception occurs
2576 void renameRSGroup(String oldName
, String newName
) throws IOException
;
2579 * Update RSGroup configuration
2580 * @param groupName the group name
2581 * @param configuration new configuration of the group name to be set
2582 * @throws IOException if a remote or network exception occurs
2584 void updateRSGroupConfig(String groupName
, Map
<String
, String
> configuration
) throws IOException
;
2587 * Retrieve recent online records from HMaster / RegionServers.
2588 * Examples include slow/large RPC logs, balancer decisions by master.
2590 * @param serverNames servers to retrieve records from, useful in case of records maintained
2591 * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will
2592 * only come from the currently active master.
2593 * @param logType string representing type of log records
2594 * @param serverType enum for server type: HMaster or RegionServer
2595 * @param limit put a limit to list of records that server should send in response
2596 * @param filterParams additional filter params
2597 * @return Log entries representing online records from servers
2598 * @throws IOException if a remote or network exception occurs
2600 List
<LogEntry
> getLogEntries(Set
<ServerName
> serverNames
, String logType
,
2601 ServerType serverType
, int limit
, Map
<String
, Object
> filterParams
) throws IOException
;