HBASE-22002 Remove the deprecated methods in Admin interface
[hbase.git] / hbase-client / src / main / java / org / apache / hadoop / hbase / client / Admin.java
blobb65b1c4d55428d3940f01f17915dc99355ede8cf
1 /**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 package org.apache.hadoop.hbase.client;
20 import static org.apache.hadoop.hbase.util.FutureUtils.get;
22 import java.io.Closeable;
23 import java.io.IOException;
24 import java.util.Collection;
25 import java.util.EnumSet;
26 import java.util.List;
27 import java.util.Map;
28 import java.util.Set;
29 import java.util.concurrent.Future;
30 import java.util.concurrent.TimeUnit;
31 import java.util.regex.Pattern;
32 import org.apache.hadoop.conf.Configuration;
33 import org.apache.hadoop.hbase.Abortable;
34 import org.apache.hadoop.hbase.CacheEvictionStats;
35 import org.apache.hadoop.hbase.ClusterMetrics;
36 import org.apache.hadoop.hbase.ClusterMetrics.Option;
37 import org.apache.hadoop.hbase.NamespaceDescriptor;
38 import org.apache.hadoop.hbase.NamespaceNotFoundException;
39 import org.apache.hadoop.hbase.RegionMetrics;
40 import org.apache.hadoop.hbase.ServerName;
41 import org.apache.hadoop.hbase.TableExistsException;
42 import org.apache.hadoop.hbase.TableName;
43 import org.apache.hadoop.hbase.TableNotFoundException;
44 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
45 import org.apache.hadoop.hbase.client.replication.TableCFs;
46 import org.apache.hadoop.hbase.client.security.SecurityCapability;
47 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
48 import org.apache.hadoop.hbase.quotas.QuotaFilter;
49 import org.apache.hadoop.hbase.quotas.QuotaSettings;
50 import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotView;
51 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
52 import org.apache.hadoop.hbase.replication.ReplicationException;
53 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
54 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
55 import org.apache.hadoop.hbase.replication.SyncReplicationState;
56 import org.apache.hadoop.hbase.security.access.Permission;
57 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
58 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
59 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
60 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
61 import org.apache.yetus.audience.InterfaceAudience;
63 /**
64 * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and
65 * call {@link #close()} when done.
66 * <p>Admin can be used to create, drop, list, enable and disable and otherwise modify tables,
67 * as well as perform other administrative operations.
69 * @see ConnectionFactory
70 * @see Connection
71 * @see Table
72 * @since 0.99.0
74 @InterfaceAudience.Public
75 public interface Admin extends Abortable, Closeable {
77 /**
78 * Return the operation timeout for a rpc call.
79 * @see #getSyncWaitTimeout()
81 int getOperationTimeout();
83 /**
84 * Return the blocking wait time for an asynchronous operation. Can be configured by
85 * {@code hbase.client.sync.wait.timeout.msec}.
86 * <p/>
87 * For several operations, such as createTable, deleteTable, etc, the rpc call will finish right
88 * after we schedule a procedure at master side, so the timeout will not be controlled by the
89 * above {@link #getOperationTimeout()}. And timeout value here tells you how much time we will
90 * wait until the procedure at master side is finished.
91 * <p/>
92 * In general, you can consider that the implementation for XXXX method is just a
93 * XXXXAsync().get(getSyncWaitTimeout(), TimeUnit.MILLISECONDS).
94 * @see #getOperationTimeout()
96 int getSyncWaitTimeout();
98 @Override
99 void abort(String why, Throwable e);
101 @Override
102 boolean isAborted();
105 * @return Connection used by this object.
107 Connection getConnection();
110 * @param tableName Table to check.
111 * @return <code>true</code> if table exists already.
112 * @throws IOException
114 boolean tableExists(TableName tableName) throws IOException;
117 * List all the userspace tables.
119 * @return a list of TableDescriptors
120 * @throws IOException if a remote or network exception occurs
122 List<TableDescriptor> listTableDescriptors() throws IOException;
125 * List all the userspace tables that match the given pattern.
127 * @param pattern The compiled regular expression to match against
128 * @return a list of TableDescriptors
129 * @throws IOException if a remote or network exception occurs
130 * @see #listTableDescriptors()
132 default List<TableDescriptor> listTableDescriptors(Pattern pattern) throws IOException {
133 return listTableDescriptors(pattern, false);
137 * List all the tables matching the given pattern.
139 * @param pattern The compiled regular expression to match against
140 * @param includeSysTables <code>false</code> to match only against userspace tables
141 * @return a list of TableDescriptors
142 * @throws IOException if a remote or network exception occurs
143 * @see #listTableDescriptors()
145 List<TableDescriptor> listTableDescriptors(Pattern pattern, boolean includeSysTables)
146 throws IOException;
149 * List all of the names of userspace tables.
151 * @return TableName[] table names
152 * @throws IOException if a remote or network exception occurs
154 TableName[] listTableNames() throws IOException;
157 * List all of the names of userspace tables.
158 * @param pattern The regular expression to match against
159 * @return array of table names
160 * @throws IOException if a remote or network exception occurs
162 default TableName[] listTableNames(Pattern pattern) throws IOException {
163 return listTableNames(pattern, false);
167 * List all of the names of userspace tables.
168 * @param pattern The regular expression to match against
169 * @param includeSysTables <code>false</code> to match only against userspace tables
170 * @return TableName[] table names
171 * @throws IOException if a remote or network exception occurs
173 TableName[] listTableNames(Pattern pattern, boolean includeSysTables)
174 throws IOException;
177 * Get a table descriptor.
179 * @param tableName as a {@link TableName}
180 * @return the tableDescriptor
181 * @throws org.apache.hadoop.hbase.TableNotFoundException
182 * @throws IOException if a remote or network exception occurs
184 TableDescriptor getDescriptor(TableName tableName)
185 throws TableNotFoundException, IOException;
188 * Creates a new table. Synchronous operation.
190 * @param desc table descriptor for table
191 * @throws IllegalArgumentException if the table name is reserved
192 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
193 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
194 * threads, the table may have been created between test-for-existence and attempt-at-creation).
195 * @throws IOException if a remote or network exception occurs
197 void createTable(TableDescriptor desc) throws IOException;
200 * Creates a new table with the specified number of regions. The start key specified will become
201 * the end key of the first region of the table, and the end key specified will become the start
202 * key of the last region of the table (the first region has a null start key and the last region
203 * has a null end key). BigInteger math will be used to divide the key range specified into enough
204 * segments to make the required number of total regions. Synchronous operation.
206 * @param desc table descriptor for table
207 * @param startKey beginning of key range
208 * @param endKey end of key range
209 * @param numRegions the total number of regions to create
210 * @throws IllegalArgumentException if the table name is reserved
211 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
212 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
213 * threads, the table may have been created between test-for-existence and attempt-at-creation).
214 * @throws IOException
216 void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions)
217 throws IOException;
220 * Creates a new table with an initial set of empty regions defined by the specified split keys.
221 * The total number of regions created will be the number of split keys plus one. Synchronous
222 * operation. Note : Avoid passing empty split key.
224 * @param desc table descriptor for table
225 * @param splitKeys array of split keys for the initial regions of the table
226 * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
227 * and if the split key has empty byte array.
228 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
229 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
230 * threads, the table may have been created between test-for-existence and attempt-at-creation).
231 * @throws IOException
233 default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException {
234 get(createTableAsync(desc, splitKeys), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
238 * Creates a new table but does not block and wait for it to come online.
239 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
240 * It may throw ExecutionException if there was an error while executing the operation
241 * or TimeoutException in case the wait timeout was not long enough to allow the
242 * operation to complete.
243 * Throws IllegalArgumentException Bad table name, if the split keys
244 * are repeated and if the split key has empty byte array.
246 * @param desc table descriptor for table
247 * @param splitKeys keys to check if the table has been created with all split keys
248 * @throws IOException if a remote or network exception occurs
249 * @return the result of the async creation. You can use Future.get(long, TimeUnit)
250 * to wait on the operation to complete.
252 Future<Void> createTableAsync(TableDescriptor desc, byte[][] splitKeys)
253 throws IOException;
256 * Deletes a table. Synchronous operation.
257 * @param tableName name of table to delete
258 * @throws IOException if a remote or network exception occurs
260 default void deleteTable(TableName tableName) throws IOException {
261 get(deleteTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
265 * Deletes the table but does not block and wait for it to be completely removed.
266 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
267 * It may throw ExecutionException if there was an error while executing the operation
268 * or TimeoutException in case the wait timeout was not long enough to allow the
269 * operation to complete.
271 * @param tableName name of table to delete
272 * @throws IOException if a remote or network exception occurs
273 * @return the result of the async delete. You can use Future.get(long, TimeUnit)
274 * to wait on the operation to complete.
276 Future<Void> deleteTableAsync(TableName tableName) throws IOException;
279 * Truncate a table. Synchronous operation.
280 * @param tableName name of table to truncate
281 * @param preserveSplits <code>true</code> if the splits should be preserved
282 * @throws IOException if a remote or network exception occurs
284 default void truncateTable(TableName tableName, boolean preserveSplits) throws IOException {
285 get(truncateTableAsync(tableName, preserveSplits), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
289 * Truncate the table but does not block and wait for it to be completely enabled. You can use
290 * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
291 * ExecutionException if there was an error while executing the operation or TimeoutException in
292 * case the wait timeout was not long enough to allow the operation to complete.
293 * @param tableName name of table to delete
294 * @param preserveSplits <code>true</code> if the splits should be preserved
295 * @throws IOException if a remote or network exception occurs
296 * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the
297 * operation to complete.
299 Future<Void> truncateTableAsync(TableName tableName, boolean preserveSplits)
300 throws IOException;
303 * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
304 * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
305 * disabled state for it to be enabled.
306 * @param tableName name of the table
307 * @throws IOException if a remote or network exception occurs There could be couple types of
308 * IOException TableNotFoundException means the table doesn't exist.
309 * TableNotDisabledException means the table isn't in disabled state.
310 * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
311 * @see #disableTable(org.apache.hadoop.hbase.TableName)
312 * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
314 default void enableTable(TableName tableName) throws IOException {
315 get(enableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
319 * Enable the table but does not block and wait for it to be completely enabled.
320 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
321 * It may throw ExecutionException if there was an error while executing the operation
322 * or TimeoutException in case the wait timeout was not long enough to allow the
323 * operation to complete.
325 * @param tableName name of table to delete
326 * @throws IOException if a remote or network exception occurs
327 * @return the result of the async enable. You can use Future.get(long, TimeUnit)
328 * to wait on the operation to complete.
330 Future<Void> enableTableAsync(TableName tableName) throws IOException;
333 * Disable the table but does not block and wait for it to be completely disabled.
334 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
335 * It may throw ExecutionException if there was an error while executing the operation
336 * or TimeoutException in case the wait timeout was not long enough to allow the
337 * operation to complete.
339 * @param tableName name of table to delete
340 * @throws IOException if a remote or network exception occurs
341 * @return the result of the async disable. You can use Future.get(long, TimeUnit)
342 * to wait on the operation to complete.
344 Future<Void> disableTableAsync(TableName tableName) throws IOException;
347 * Disable table and wait on completion. May timeout eventually. Use
348 * {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
349 * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
350 * enabled state for it to be disabled.
351 * @param tableName
352 * @throws IOException There could be couple types of IOException TableNotFoundException means the
353 * table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
355 default void disableTable(TableName tableName) throws IOException {
356 get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
360 * @param tableName name of table to check
361 * @return <code>true</code> if table is on-line
362 * @throws IOException if a remote or network exception occurs
364 boolean isTableEnabled(TableName tableName) throws IOException;
367 * @param tableName name of table to check
368 * @return <code>true</code> if table is off-line
369 * @throws IOException if a remote or network exception occurs
371 boolean isTableDisabled(TableName tableName) throws IOException;
374 * @param tableName name of table to check
375 * @return <code>true</code> if all regions of the table are available
376 * @throws IOException if a remote or network exception occurs
378 boolean isTableAvailable(TableName tableName) throws IOException;
381 * Add a column family to an existing table. Synchronous operation. Use
382 * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a
383 * {@link Future} from which you can learn whether success or failure.
384 * @param tableName name of the table to add column family to
385 * @param columnFamily column family descriptor of column family to be added
386 * @throws IOException if a remote or network exception occurs
388 default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
389 throws IOException {
390 get(addColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
394 * Add a column family to an existing table. Asynchronous operation.
395 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
396 * It may throw ExecutionException if there was an error while executing the operation
397 * or TimeoutException in case the wait timeout was not long enough to allow the
398 * operation to complete.
400 * @param tableName name of the table to add column family to
401 * @param columnFamily column family descriptor of column family to be added
402 * @throws IOException if a remote or network exception occurs
403 * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to
404 * wait on the operation to complete.
406 Future<Void> addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
407 throws IOException;
410 * Delete a column family from a table. Synchronous operation. Use
411 * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from
412 * which you can learn whether success or failure.
413 * @param tableName name of table
414 * @param columnFamily name of column family to be deleted
415 * @throws IOException if a remote or network exception occurs
417 default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws IOException {
418 get(deleteColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
419 TimeUnit.MILLISECONDS);
423 * Delete a column family from a table. Asynchronous operation.
424 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
425 * It may throw ExecutionException if there was an error while executing the operation
426 * or TimeoutException in case the wait timeout was not long enough to allow the
427 * operation to complete.
429 * @param tableName name of table
430 * @param columnFamily name of column family to be deleted
431 * @throws IOException if a remote or network exception occurs
432 * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to
433 * wait on the operation to complete.
435 Future<Void> deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily)
436 throws IOException;
439 * Modify an existing column family on a table. Synchronous operation. Use
440 * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns
441 * a {@link Future} from which you can learn whether success or failure.
442 * @param tableName name of table
443 * @param columnFamily new column family descriptor to use
444 * @throws IOException if a remote or network exception occurs
446 default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily)
447 throws IOException {
448 get(modifyColumnFamilyAsync(tableName, columnFamily), getSyncWaitTimeout(),
449 TimeUnit.MILLISECONDS);
453 * Modify an existing column family on a table. Asynchronous operation.
454 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
455 * It may throw ExecutionException if there was an error while executing the operation
456 * or TimeoutException in case the wait timeout was not long enough to allow the
457 * operation to complete.
459 * @param tableName name of table
460 * @param columnFamily new column family descriptor to use
461 * @throws IOException if a remote or network exception occurs
462 * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to
463 * wait on the operation to complete.
465 Future<Void> modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor columnFamily)
466 throws IOException;
469 * Get all the online regions on a region server.
471 * @return List of {@link RegionInfo}
472 * @throws java.io.IOException
474 List<RegionInfo> getRegions(ServerName serverName) throws IOException;
477 * Flush a table. Synchronous operation.
479 * @param tableName table to flush
480 * @throws IOException if a remote or network exception occurs
482 void flush(TableName tableName) throws IOException;
485 * Flush an individual region. Synchronous operation.
487 * @param regionName region to flush
488 * @throws IOException if a remote or network exception occurs
490 void flushRegion(byte[] regionName) throws IOException;
493 * Flush all regions on the region server. Synchronous operation.
494 * @param serverName the region server name to flush
495 * @throws IOException if a remote or network exception occurs
497 void flushRegionServer(ServerName serverName) throws IOException;
500 * Compact a table. Asynchronous operation in that this method requests that a
501 * Compaction run and then it returns. It does not wait on the completion of Compaction
502 * (it can take a while).
504 * @param tableName table to compact
505 * @throws IOException if a remote or network exception occurs
507 void compact(TableName tableName) throws IOException;
510 * Compact an individual region. Asynchronous operation in that this method requests that a
511 * Compaction run and then it returns. It does not wait on the completion of Compaction
512 * (it can take a while).
514 * @param regionName region to compact
515 * @throws IOException if a remote or network exception occurs
517 void compactRegion(byte[] regionName) throws IOException;
520 * Compact a column family within a table. Asynchronous operation in that this method requests
521 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
522 * (it can take a while).
524 * @param tableName table to compact
525 * @param columnFamily column family within a table
526 * @throws IOException if a remote or network exception occurs
528 void compact(TableName tableName, byte[] columnFamily)
529 throws IOException;
532 * Compact a column family within a region. Asynchronous operation in that this method requests
533 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
534 * (it can take a while).
536 * @param regionName region to compact
537 * @param columnFamily column family within a region
538 * @throws IOException if a remote or network exception occurs
540 void compactRegion(byte[] regionName, byte[] columnFamily)
541 throws IOException;
544 * Compact a table. Asynchronous operation in that this method requests that a
545 * Compaction run and then it returns. It does not wait on the completion of Compaction
546 * (it can take a while).
548 * @param tableName table to compact
549 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
550 * @throws IOException if a remote or network exception occurs
551 * @throws InterruptedException
553 void compact(TableName tableName, CompactType compactType)
554 throws IOException, InterruptedException;
557 * Compact a column family within a table. Asynchronous operation in that this method
558 * requests that a Compaction run and then it returns. It does not wait on the
559 * completion of Compaction (it can take a while).
561 * @param tableName table to compact
562 * @param columnFamily column family within a table
563 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
564 * @throws IOException if not a mob column family or if a remote or network exception occurs
565 * @throws InterruptedException
567 void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
568 throws IOException, InterruptedException;
571 * Major compact a table. Asynchronous operation in that this method requests
572 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
573 * (it can take a while).
575 * @param tableName table to major compact
576 * @throws IOException if a remote or network exception occurs
578 void majorCompact(TableName tableName) throws IOException;
581 * Major compact a table or an individual region. Asynchronous operation in that this method requests
582 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
583 * (it can take a while).
585 * @param regionName region to major compact
586 * @throws IOException if a remote or network exception occurs
588 void majorCompactRegion(byte[] regionName) throws IOException;
591 * Major compact a column family within a table. Asynchronous operation in that this method requests
592 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
593 * (it can take a while).
595 * @param tableName table to major compact
596 * @param columnFamily column family within a table
597 * @throws IOException if a remote or network exception occurs
599 void majorCompact(TableName tableName, byte[] columnFamily)
600 throws IOException;
603 * Major compact a column family within region. Asynchronous operation in that this method requests
604 * that a Compaction run and then it returns. It does not wait on the completion of Compaction
605 * (it can take a while).
607 * @param regionName egion to major compact
608 * @param columnFamily column family within a region
609 * @throws IOException if a remote or network exception occurs
611 void majorCompactRegion(byte[] regionName, byte[] columnFamily)
612 throws IOException;
615 * Major compact a table. Asynchronous operation in that this method requests that a
616 * Compaction run and then it returns. It does not wait on the completion of Compaction
617 * (it can take a while).
619 * @param tableName table to compact
620 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
621 * @throws IOException if a remote or network exception occurs
622 * @throws InterruptedException
624 void majorCompact(TableName tableName, CompactType compactType)
625 throws IOException, InterruptedException;
628 * Major compact a column family within a table. Asynchronous operation in that this method requests that a
629 * Compaction run and then it returns. It does not wait on the completion of Compaction
630 * (it can take a while).
632 * @param tableName table to compact
633 * @param columnFamily column family within a table
634 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
635 * @throws IOException if not a mob column family or if a remote or network exception occurs
636 * @throws InterruptedException
638 void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
639 throws IOException, InterruptedException;
642 * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing
643 * compactions. This state is ephemeral. The setting will be lost on restart. Compaction
644 * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled
645 * in hbase-site.xml.
647 * @param switchState Set to <code>true</code> to enable, <code>false</code> to disable.
648 * @param serverNamesList list of region servers.
649 * @return Previous compaction states for region servers
651 Map<ServerName, Boolean> compactionSwitch(boolean switchState, List<String> serverNamesList)
652 throws IOException;
655 * Compact all regions on the region server. Asynchronous operation in that this method requests
656 * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it
657 * can take a while).
658 * @param serverName the region server name
659 * @throws IOException if a remote or network exception occurs
661 void compactRegionServer(ServerName serverName) throws IOException;
664 * Major compact all regions on the region server. Asynchronous operation in that this method
665 * requests that a Compaction run and then it returns. It does not wait on the completion of
666 * Compaction (it can take a while).
667 * @param serverName the region server name
668 * @throws IOException if a remote or network exception occurs
670 void majorCompactRegionServer(ServerName serverName) throws IOException;
673 * Move the region <code>r</code> to <code>dest</code>.
675 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
676 * suffix: e.g. if regionname is
677 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
678 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
679 * @param destServerName The servername of the destination regionserver. If passed the empty byte
680 * array we'll assign to a random server. A server name is made of host, port and startcode.
681 * Here is an example: <code> host187.example.com,60020,1289493121758</code>
682 * @throws IOException if we can't find a region named
683 * <code>encodedRegionName</code>
685 void move(byte[] encodedRegionName, byte[] destServerName) throws IOException;
688 * Assign a Region.
689 * @param regionName Region name to assign.
691 void assign(byte[] regionName) throws IOException;
694 * Unassign a region from current hosting regionserver. Region will then be assigned to a
695 * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link
696 * #move(byte[], byte[])} if you want to control the region movement.
698 * @param regionName Region to unassign. Will clear any existing RegionPlan if one found.
699 * @param force If <code>true</code>, force unassign (Will remove region from regions-in-transition too if
700 * present. If results in double assignment use hbck -fix to resolve. To be used by experts).
702 void unassign(byte[] regionName, boolean force)
703 throws IOException;
706 * Offline specified region from master's in-memory state. It will not attempt to reassign the
707 * region as in unassign. This API can be used when a region not served by any region server and
708 * still online as per Master's in memory state. If this API is incorrectly used on active region
709 * then master will loose track of that region. This is a special method that should be used by
710 * experts or hbck.
712 * @param regionName Region to offline.
713 * @throws IOException
715 void offline(byte[] regionName) throws IOException;
718 * Turn the load balancer on or off.
719 * @param onOrOff Set to <code>true</code> to enable, <code>false</code> to disable.
720 * @param synchronous If <code>true</code>, it waits until current balance() call, if outstanding,
721 * to return.
722 * @return Previous balancer value
724 boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException;
728 * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
729 * reassignments. Can NOT run for various reasons. Check logs.
731 * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
733 boolean balance() throws IOException;
736 * Invoke the balancer. Will run the balancer and if regions to move, it will
737 * go ahead and do the reassignments. If there is region in transition, force parameter of true
738 * would still run balancer. Can *not* run for other reasons. Check
739 * logs.
740 * @param force whether we should force balance even if there is region in transition
741 * @return <code>true</code> if balancer ran, <code>false</code> otherwise.
743 boolean balance(boolean force) throws IOException;
746 * Query the current state of the balancer.
748 * @return <code>true</code> if the balancer is enabled, <code>false</code> otherwise.
750 boolean isBalancerEnabled() throws IOException;
753 * Clear all the blocks corresponding to this table from BlockCache. For expert-admins.
754 * Calling this API will drop all the cached blocks specific to a table from BlockCache.
755 * This can significantly impact the query performance as the subsequent queries will
756 * have to retrieve the blocks from underlying filesystem.
758 * @param tableName table to clear block cache
759 * @return CacheEvictionStats related to the eviction
760 * @throws IOException if a remote or network exception occurs
762 CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException;
765 * Invoke region normalizer. Can NOT run for various reasons. Check logs.
767 * @return <code>true</code> if region normalizer ran, <code>false</code> otherwise.
769 boolean normalize() throws IOException;
772 * Query the current state of the region normalizer.
774 * @return <code>true</code> if region normalizer is enabled, <code>false</code> otherwise.
776 boolean isNormalizerEnabled() throws IOException;
779 * Turn region normalizer on or off.
781 * @return Previous normalizer value
783 boolean normalizerSwitch(boolean on) throws IOException;
786 * Enable/Disable the catalog janitor/
788 * @param onOrOff if <code>true</code> enables the catalog janitor
789 * @return the previous state
791 boolean catalogJanitorSwitch(boolean onOrOff) throws IOException;
794 * Ask for a scan of the catalog table.
796 * @return the number of entries cleaned
798 int runCatalogJanitor() throws IOException;
801 * Query on the catalog janitor state (Enabled/Disabled?).
804 boolean isCatalogJanitorEnabled() throws IOException;
807 * Enable/Disable the cleaner chore.
809 * @param onOrOff if <code>true</code> enables the cleaner chore
810 * @return the previous state
811 * @throws IOException
813 boolean cleanerChoreSwitch(boolean onOrOff) throws IOException;
816 * Ask for cleaner chore to run.
818 * @return <code>true</code> if cleaner chore ran, <code>false</code> otherwise
819 * @throws IOException
821 boolean runCleanerChore() throws IOException;
824 * Query on the cleaner chore state (Enabled/Disabled?).
826 * @throws IOException
828 boolean isCleanerChoreEnabled() throws IOException;
832 * Merge two regions. Asynchronous operation.
834 * @param nameOfRegionA encoded or full name of region a
835 * @param nameOfRegionB encoded or full name of region b
836 * @param forcible <code>true</code> if do a compulsory merge, otherwise we will only merge
837 * two adjacent regions
838 * @throws IOException
840 Future<Void> mergeRegionsAsync(
841 byte[] nameOfRegionA,
842 byte[] nameOfRegionB,
843 boolean forcible) throws IOException;
846 * Merge regions. Asynchronous operation.
848 * @param nameofRegionsToMerge encoded or full name of daughter regions
849 * @param forcible <code>true</code> if do a compulsory merge, otherwise we will only merge
850 * adjacent regions
851 * @throws IOException
853 Future<Void> mergeRegionsAsync(
854 byte[][] nameofRegionsToMerge,
855 boolean forcible) throws IOException;
858 * Split a table. The method will execute split action for each region in table.
859 * Asynchronous operation.
860 * @param tableName table to split
861 * @throws IOException if a remote or network exception occurs
863 void split(TableName tableName) throws IOException;
866 * Split a table. Asynchronous operation.
868 * @param tableName table to split
869 * @param splitPoint the explicit position to split on
870 * @throws IOException if a remote or network exception occurs
872 void split(TableName tableName, byte[] splitPoint) throws IOException;
875 * Split an individual region. Asynchronous operation.
876 * @param regionName region to split
877 * @param splitPoint the explicit position to split on
878 * @throws IOException if a remote or network exception occurs
880 Future<Void> splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException;
883 * Modify an existing table, more IRB friendly version.
884 * @param td modified description of the table
885 * @throws IOException if a remote or network exception occurs
887 default void modifyTable(TableDescriptor td) throws IOException {
888 get(modifyTableAsync(td), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
892 * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means
893 * that it may be a while before your schema change is updated across all of the table. You can
894 * use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
895 * ExecutionException if there was an error while executing the operation or TimeoutException in
896 * case the wait timeout was not long enough to allow the operation to complete.
897 * @param td description of the table
898 * @throws IOException if a remote or network exception occurs
899 * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
900 * operation to complete
902 Future<Void> modifyTableAsync(TableDescriptor td) throws IOException;
905 * Shuts down the HBase cluster.
906 * <p/>
907 * Notice that, a success shutdown call may ends with an error since the remote server has already
908 * been shutdown.
909 * @throws IOException if a remote or network exception occurs
911 void shutdown() throws IOException;
914 * Shuts down the current HBase master only. Does not shutdown the cluster.
915 * <p/>
916 * Notice that, a success stopMaster call may ends with an error since the remote server has
917 * already been shutdown.
918 * @throws IOException if a remote or network exception occurs
919 * @see #shutdown()
921 void stopMaster() throws IOException;
924 * Check whether Master is in maintenance mode.
926 * @throws IOException if a remote or network exception occurs
928 boolean isMasterInMaintenanceMode() throws IOException;
931 * Stop the designated regionserver.
933 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
934 * <code>example.org:1234</code>
935 * @throws IOException if a remote or network exception occurs
937 void stopRegionServer(String hostnamePort) throws IOException;
940 * Get whole cluster metrics, containing status about:
941 * <pre>
942 * hbase version
943 * cluster id
944 * primary/backup master(s)
945 * master's coprocessors
946 * live/dead regionservers
947 * balancer
948 * regions in transition
949 * </pre>
950 * @return cluster metrics
951 * @throws IOException if a remote or network exception occurs
953 default ClusterMetrics getClusterMetrics() throws IOException {
954 return getClusterMetrics(EnumSet.allOf(ClusterMetrics.Option.class));
958 * Get cluster status with a set of {@link Option} to get desired status.
959 * @return cluster status
960 * @throws IOException if a remote or network exception occurs
962 ClusterMetrics getClusterMetrics(EnumSet<Option> options) throws IOException;
965 * @return current master server name
966 * @throws IOException if a remote or network exception occurs
968 default ServerName getMaster() throws IOException {
969 return getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
973 * @return current backup master list
974 * @throws IOException if a remote or network exception occurs
976 default Collection<ServerName> getBackupMasters() throws IOException {
977 return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS)).getBackupMasterNames();
981 * @return current live region servers list
982 * @throws IOException if a remote or network exception occurs
984 default Collection<ServerName> getRegionServers() throws IOException {
985 return getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
989 * Get {@link RegionMetrics} of all regions hosted on a regionserver.
991 * @param serverName region server from which {@link RegionMetrics} is required.
992 * @return a {@link RegionMetrics} list of all regions hosted on a region server
993 * @throws IOException if a remote or network exception occurs
995 default List<RegionMetrics> getRegionMetrics(ServerName serverName) throws IOException {
996 return getRegionMetrics(serverName, null);
1000 * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table.
1002 * @param serverName region server from which {@link RegionMetrics} is required.
1003 * @param tableName get {@link RegionMetrics} of regions belonging to the table
1004 * @return region metrics map of all regions of a table hosted on a region server
1005 * @throws IOException if a remote or network exception occurs
1007 List<RegionMetrics> getRegionMetrics(ServerName serverName,
1008 TableName tableName) throws IOException;
1011 * @return Configuration used by the instance.
1013 Configuration getConfiguration();
1016 * Create a new namespace. Blocks until namespace has been successfully created or an exception is
1017 * thrown.
1018 * @param descriptor descriptor which describes the new namespace.
1020 default void createNamespace(NamespaceDescriptor descriptor) throws IOException {
1021 get(createNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1025 * Create a new namespace.
1026 * @param descriptor descriptor which describes the new namespace
1027 * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
1028 * wait on the operation to complete.
1030 Future<Void> createNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
1033 * Modify an existing namespace. Blocks until namespace has been successfully modified or an
1034 * exception is thrown.
1035 * @param descriptor descriptor which describes the new namespace
1037 default void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
1038 get(modifyNamespaceAsync(descriptor), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1042 * Modify an existing namespace.
1043 * @param descriptor descriptor which describes the new namespace
1044 * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
1045 * wait on the operation to complete.
1047 Future<Void> modifyNamespaceAsync(NamespaceDescriptor descriptor) throws IOException;
1050 * Delete an existing namespace. Only empty namespaces (no tables) can be removed. Blocks until
1051 * namespace has been successfully deleted or an exception is thrown.
1052 * @param name namespace name
1054 default void deleteNamespace(String name) throws IOException {
1055 get(deleteNamespaceAsync(name), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1059 * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
1060 * @param name namespace name
1061 * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
1062 * wait on the operation to complete.
1064 Future<Void> deleteNamespaceAsync(String name) throws IOException;
1067 * Get a namespace descriptor by name.
1068 * @param name name of namespace descriptor
1069 * @return A descriptor
1070 * @throws org.apache.hadoop.hbase.NamespaceNotFoundException
1071 * @throws IOException if a remote or network exception occurs
1073 NamespaceDescriptor getNamespaceDescriptor(String name)
1074 throws NamespaceNotFoundException, IOException;
1077 * List available namespace descriptors.
1078 * @return List of descriptors
1080 NamespaceDescriptor[] listNamespaceDescriptors() throws IOException;
1083 * Get list of table descriptors by namespace.
1084 * @param name namespace name
1085 * @return returns a list of TableDescriptors
1087 List<TableDescriptor> listTableDescriptorsByNamespace(byte[] name) throws IOException;
1090 * Get list of table names by namespace.
1091 * @param name namespace name
1092 * @return The list of table names in the namespace
1094 TableName[] listTableNamesByNamespace(String name) throws IOException;
1097 * Get the regions of a given table.
1099 * @param tableName the name of the table
1100 * @return List of {@link RegionInfo}.
1102 List<RegionInfo> getRegions(TableName tableName) throws IOException;
1104 @Override
1105 void close();
1108 * Get tableDescriptors.
1110 * @param tableNames List of table names
1111 * @return returns a list of TableDescriptors
1112 * @throws IOException if a remote or network exception occurs
1114 List<TableDescriptor> listTableDescriptors(List<TableName> tableNames)
1115 throws IOException;
1118 * Abort a procedure.
1119 * <p/>
1120 * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
1121 * @param procId ID of the procedure to abort
1122 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1123 * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does
1124 * not exist
1125 * @throws IOException
1126 * @deprecated Since 2.1.1 -- to be removed.
1128 @Deprecated
1129 default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throws IOException {
1130 return get(abortProcedureAsync(procId, mayInterruptIfRunning), getSyncWaitTimeout(),
1131 TimeUnit.MILLISECONDS);
1135 * Abort a procedure but does not block and wait for completion.
1136 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1137 * It may throw ExecutionException if there was an error while executing the operation
1138 * or TimeoutException in case the wait timeout was not long enough to allow the
1139 * operation to complete.
1140 * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
1142 * @param procId ID of the procedure to abort
1143 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1144 * @return <code>true</code> if aborted, <code>false</code> if procedure already completed or does not exist
1145 * @throws IOException
1146 * @deprecated Since 2.1.1 -- to be removed.
1148 @Deprecated
1149 Future<Boolean> abortProcedureAsync(long procId, boolean mayInterruptIfRunning)
1150 throws IOException;
1153 * Get procedures.
1154 * @return procedure list in JSON
1155 * @throws IOException
1157 String getProcedures() throws IOException;
1160 * Get locks.
1161 * @return lock list in JSON
1162 * @throws IOException if a remote or network exception occurs
1164 String getLocks() throws IOException;
1167 * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
1169 * Note that the actual rolling of the log writer is asynchronous and may not be complete when
1170 * this method returns. As a side effect of this call, the named region server may schedule
1171 * store flushes at the request of the wal.
1173 * @param serverName The servername of the regionserver.
1174 * @throws IOException if a remote or network exception occurs
1175 * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
1177 void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException;
1180 * Helper that delegates to getClusterMetrics().getMasterCoprocessorNames().
1181 * @return an array of master coprocessors
1182 * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames()
1184 default List<String> getMasterCoprocessorNames() throws IOException {
1185 return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS))
1186 .getMasterCoprocessorNames();
1190 * Get the current compaction state of a table. It could be in a major compaction, a minor
1191 * compaction, both, or none.
1193 * @param tableName table to examine
1194 * @return the current compaction state
1195 * @throws IOException if a remote or network exception occurs
1197 CompactionState getCompactionState(TableName tableName) throws IOException;
1200 * Get the current compaction state of a table. It could be in a compaction, or none.
1202 * @param tableName table to examine
1203 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
1204 * @return the current compaction state
1205 * @throws IOException if a remote or network exception occurs
1207 CompactionState getCompactionState(TableName tableName,
1208 CompactType compactType) throws IOException;
1211 * Get the current compaction state of region. It could be in a major compaction, a minor
1212 * compaction, both, or none.
1214 * @param regionName region to examine
1215 * @return the current compaction state
1216 * @throws IOException if a remote or network exception occurs
1218 CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException;
1221 * Get the timestamp of the last major compaction for the passed table
1223 * The timestamp of the oldest HFile resulting from a major compaction of that table,
1224 * or 0 if no such HFile could be found.
1226 * @param tableName table to examine
1227 * @return the last major compaction timestamp or 0
1228 * @throws IOException if a remote or network exception occurs
1230 long getLastMajorCompactionTimestamp(TableName tableName) throws IOException;
1233 * Get the timestamp of the last major compaction for the passed region.
1235 * The timestamp of the oldest HFile resulting from a major compaction of that region,
1236 * or 0 if no such HFile could be found.
1238 * @param regionName region to examine
1239 * @return the last major compaction timestamp or 0
1240 * @throws IOException if a remote or network exception occurs
1242 long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
1245 * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
1246 * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
1247 * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
1248 * a different type or with different parameters) will fail with a
1249 * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
1250 * naming. Snapshot names follow the same naming constraints as tables in HBase. See
1251 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1252 * @param snapshotName name of the snapshot to be created
1253 * @param tableName name of the table for which snapshot is created
1254 * @throws IOException if a remote or network exception occurs
1255 * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
1256 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1258 default void snapshot(String snapshotName, TableName tableName)
1259 throws IOException, SnapshotCreationException, IllegalArgumentException {
1260 snapshot(snapshotName, tableName, SnapshotType.FLUSH);
1264 * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1265 * snapshot</b>. Attempts to take a snapshot with the same name (even a different type or with
1266 * different parameters) will fail with a {@link SnapshotCreationException} indicating the
1267 * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
1268 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1269 * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1270 * snapshots stored on the cluster
1271 * @param tableName name of the table to snapshot
1272 * @param type type of snapshot to take
1273 * @throws IOException we fail to reach the master
1274 * @throws SnapshotCreationException if snapshot creation failed
1275 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1277 default void snapshot(String snapshotName, TableName tableName, SnapshotType type)
1278 throws IOException, SnapshotCreationException, IllegalArgumentException {
1279 snapshot(new SnapshotDescription(snapshotName, tableName, type));
1283 * Take a snapshot and wait for the server to complete that snapshot (blocking). Only a single
1284 * snapshot should be taken at a time for an instance of HBase, or results may be undefined (you
1285 * can tell multiple HBase clusters to snapshot at the same time, but only one at a time for a
1286 * single cluster). Snapshots are considered unique based on <b>the name of the snapshot</b>.
1287 * Attempts to take a snapshot with the same name (even a different type or with different
1288 * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
1289 * Snapshot names follow the same naming constraints as tables in HBase. See
1290 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should
1291 * probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you are sure
1292 * about the type of snapshot that you want to take.
1293 * @param snapshot snapshot to take
1294 * @throws IOException or we lose contact with the master.
1295 * @throws SnapshotCreationException if snapshot failed to be taken
1296 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1298 void snapshot(SnapshotDescription snapshot)
1299 throws IOException, SnapshotCreationException, IllegalArgumentException;
1302 * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a
1303 * single snapshot should be taken at a time, or results may be undefined.
1305 * @param snapshot snapshot to take
1306 * @throws IOException if the snapshot did not succeed or we lose contact with the master.
1307 * @throws SnapshotCreationException if snapshot creation failed
1308 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1310 Future<Void> snapshotAsync(SnapshotDescription snapshot)
1311 throws IOException, SnapshotCreationException;
1314 * Check the current state of the passed snapshot. There are three possible states: <ol>
1315 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1316 * <li>finished with error - throws the exception that caused the snapshot to fail</li> </ol> The
1317 * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
1318 * run/started since the snapshot you are checking, you will receive an {@link
1319 * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
1321 * @param snapshot description of the snapshot to check
1322 * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
1323 * running
1324 * @throws IOException if we have a network issue
1325 * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed
1326 * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
1327 * unknown
1329 boolean isSnapshotFinished(SnapshotDescription snapshot)
1330 throws IOException, HBaseSnapshotException, UnknownSnapshotException;
1333 * Restore the specified snapshot on the original table. (The table must be disabled) If the
1334 * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to
1335 * <code>true</code>, a snapshot of the current table is taken before executing the restore
1336 * operation. In case of restore failure, the failsafe snapshot will be restored. If the restore
1337 * completes without problem the failsafe snapshot is deleted.
1338 * @param snapshotName name of the snapshot to restore
1339 * @throws IOException if a remote or network exception occurs
1340 * @throws RestoreSnapshotException if snapshot failed to be restored
1341 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1343 void restoreSnapshot(String snapshotName) throws IOException, RestoreSnapshotException;
1346 * Restore the specified snapshot on the original table. (The table must be disabled) If
1347 * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1348 * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1349 * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1350 * failsafe snapshot name is configurable by using the property
1351 * "hbase.snapshot.restore.failsafe.name".
1352 * @param snapshotName name of the snapshot to restore
1353 * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1354 * @throws IOException if a remote or network exception occurs
1355 * @throws RestoreSnapshotException if snapshot failed to be restored
1356 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1358 default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
1359 throws IOException, RestoreSnapshotException {
1360 restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
1364 * Restore the specified snapshot on the original table. (The table must be disabled) If
1365 * 'takeFailSafeSnapshot' is set to <code>true</code>, a snapshot of the current table is taken
1366 * before executing the restore operation. In case of restore failure, the failsafe snapshot will
1367 * be restored. If the restore completes without problem the failsafe snapshot is deleted. The
1368 * failsafe snapshot name is configurable by using the property
1369 * "hbase.snapshot.restore.failsafe.name".
1370 * @param snapshotName name of the snapshot to restore
1371 * @param takeFailSafeSnapshot <code>true</code> if the failsafe snapshot should be taken
1372 * @param restoreAcl <code>true</code> to restore acl of snapshot
1373 * @throws IOException if a remote or network exception occurs
1374 * @throws RestoreSnapshotException if snapshot failed to be restored
1375 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1377 void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
1378 throws IOException, RestoreSnapshotException;
1381 * Create a new table by cloning the snapshot content.
1382 * @param snapshotName name of the snapshot to be cloned
1383 * @param tableName name of the table where the snapshot will be restored
1384 * @throws IOException if a remote or network exception occurs
1385 * @throws TableExistsException if table to be created already exists
1386 * @throws RestoreSnapshotException if snapshot failed to be cloned
1387 * @throws IllegalArgumentException if the specified table has not a valid name
1389 default void cloneSnapshot(String snapshotName, TableName tableName)
1390 throws IOException, TableExistsException, RestoreSnapshotException {
1391 cloneSnapshot(snapshotName, tableName, false);
1395 * Create a new table by cloning the snapshot content.
1396 * @param snapshotName name of the snapshot to be cloned
1397 * @param tableName name of the table where the snapshot will be restored
1398 * @param restoreAcl <code>true</code> to clone acl into newly created table
1399 * @throws IOException if a remote or network exception occurs
1400 * @throws TableExistsException if table to be created already exists
1401 * @throws RestoreSnapshotException if snapshot failed to be cloned
1402 * @throws IllegalArgumentException if the specified table has not a valid name
1404 default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
1405 throws IOException, TableExistsException, RestoreSnapshotException {
1406 get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl), getSyncWaitTimeout(),
1407 TimeUnit.MILLISECONDS);
1411 * Create a new table by cloning the snapshot content, but does not block and wait for it to be
1412 * completely cloned. You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1413 * It may throw ExecutionException if there was an error while executing the operation or
1414 * TimeoutException in case the wait timeout was not long enough to allow the operation to
1415 * complete.
1416 * @param snapshotName name of the snapshot to be cloned
1417 * @param tableName name of the table where the snapshot will be restored
1418 * @throws IOException if a remote or network exception occurs
1419 * @throws TableExistsException if table to be cloned already exists
1420 * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) to wait
1421 * on the operation to complete.
1423 default Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName)
1424 throws IOException, TableExistsException {
1425 return cloneSnapshotAsync(snapshotName, tableName, false);
1429 * Create a new table by cloning the snapshot content.
1430 * @param snapshotName name of the snapshot to be cloned
1431 * @param tableName name of the table where the snapshot will be restored
1432 * @param restoreAcl <code>true</code> to clone acl into newly created table
1433 * @throws IOException if a remote or network exception occurs
1434 * @throws TableExistsException if table to be created already exists
1435 * @throws RestoreSnapshotException if snapshot failed to be cloned
1436 * @throws IllegalArgumentException if the specified table has not a valid name
1438 Future<Void> cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl)
1439 throws IOException, TableExistsException, RestoreSnapshotException;
1442 * Execute a distributed procedure on a cluster.
1444 * @param signature A distributed procedure is uniquely identified by its signature (default the
1445 * root ZK node name of the procedure).
1446 * @param instance The instance name of the procedure. For some procedures, this parameter is
1447 * optional.
1448 * @param props Property/Value pairs of properties passing to the procedure
1449 * @throws IOException
1451 void execProcedure(String signature, String instance, Map<String, String> props)
1452 throws IOException;
1455 * Execute a distributed procedure on a cluster.
1457 * @param signature A distributed procedure is uniquely identified by its signature (default the
1458 * root ZK node name of the procedure).
1459 * @param instance The instance name of the procedure. For some procedures, this parameter is
1460 * optional.
1461 * @param props Property/Value pairs of properties passing to the procedure
1462 * @return data returned after procedure execution. null if no return data.
1463 * @throws IOException
1465 byte[] execProcedureWithReturn(String signature, String instance, Map<String, String> props)
1466 throws IOException;
1469 * Check the current state of the specified procedure. There are three possible states: <ol>
1470 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1471 * <li>finished with error - throws the exception that caused the procedure to fail</li> </ol>
1473 * @param signature The signature that uniquely identifies a procedure
1474 * @param instance The instance name of the procedure
1475 * @param props Property/Value pairs of properties passing to the procedure
1476 * @return <code>true</code> if the specified procedure is finished successfully, <code>false</code> if it is still running
1477 * @throws IOException if the specified procedure finished with error
1479 boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
1480 throws IOException;
1483 * List completed snapshots.
1485 * @return a list of snapshot descriptors for completed snapshots
1486 * @throws IOException if a network error occurs
1488 List<SnapshotDescription> listSnapshots() throws IOException;
1491 * List all the completed snapshots matching the given pattern.
1493 * @param pattern The compiled regular expression to match against
1494 * @return list of SnapshotDescription
1495 * @throws IOException if a remote or network exception occurs
1497 List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
1500 * List all the completed snapshots matching the given table name regular expression and snapshot
1501 * name regular expression.
1502 * @param tableNamePattern The compiled table name regular expression to match against
1503 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1504 * @return list of completed SnapshotDescription
1505 * @throws IOException if a remote or network exception occurs
1507 List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
1508 Pattern snapshotNamePattern) throws IOException;
1511 * Delete an existing snapshot.
1513 * @param snapshotName name of the snapshot
1514 * @throws IOException if a remote or network exception occurs
1516 void deleteSnapshot(byte[] snapshotName) throws IOException;
1519 * Delete an existing snapshot.
1521 * @param snapshotName name of the snapshot
1522 * @throws IOException if a remote or network exception occurs
1524 void deleteSnapshot(String snapshotName) throws IOException;
1527 * Delete existing snapshots whose names match the pattern passed.
1529 * @param pattern pattern for names of the snapshot to match
1530 * @throws IOException if a remote or network exception occurs
1532 void deleteSnapshots(Pattern pattern) throws IOException;
1535 * Delete all existing snapshots matching the given table name regular expression and snapshot
1536 * name regular expression.
1537 * @param tableNamePattern The compiled table name regular expression to match against
1538 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1539 * @throws IOException if a remote or network exception occurs
1541 void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern)
1542 throws IOException;
1545 * Apply the new quota settings.
1547 * @param quota the quota settings
1548 * @throws IOException if a remote or network exception occurs
1550 void setQuota(QuotaSettings quota) throws IOException;
1553 * List the quotas based on the filter.
1554 * @param filter the quota settings filter
1555 * @return the QuotaSetting list
1556 * @throws IOException if a remote or network exception occurs
1558 List<QuotaSettings> getQuota(QuotaFilter filter) throws IOException;
1561 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active
1562 * master. <p> The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access
1563 * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service
1564 * invocations: </p> <div style="background-color: #cccccc; padding: 2px">
1565 * <blockquote><pre>
1566 * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
1567 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1568 * MyCallRequest request = MyCallRequest.newBuilder()
1569 * ...
1570 * .build();
1571 * MyCallResponse response = service.myCall(null, request);
1572 * </pre></blockquote></div>
1574 * @return A MasterCoprocessorRpcChannel instance
1576 CoprocessorRpcChannel coprocessorService();
1580 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
1581 * connected to the passed region server.
1583 * <p>
1584 * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
1585 * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
1586 * </p>
1588 * <div style="background-color: #cccccc; padding: 2px">
1589 * <blockquote><pre>
1590 * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
1591 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
1592 * MyCallRequest request = MyCallRequest.newBuilder()
1593 * ...
1594 * .build();
1595 * MyCallResponse response = service.myCall(null, request);
1596 * </pre></blockquote></div>
1598 * @param serverName the server name to which the endpoint call is made
1599 * @return A RegionServerCoprocessorRpcChannel instance
1601 CoprocessorRpcChannel coprocessorService(ServerName serverName);
1605 * Update the configuration and trigger an online config change
1606 * on the regionserver.
1607 * @param server : The server whose config needs to be updated.
1608 * @throws IOException
1610 void updateConfiguration(ServerName server) throws IOException;
1614 * Update the configuration and trigger an online config change
1615 * on all the regionservers.
1616 * @throws IOException
1618 void updateConfiguration() throws IOException;
1621 * Get the info port of the current master if one is available.
1622 * @return master info port
1623 * @throws IOException
1625 default int getMasterInfoPort() throws IOException {
1626 return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).getMasterInfoPort();
1630 * Return the set of supported security capabilities.
1631 * @throws IOException
1632 * @throws UnsupportedOperationException
1634 List<SecurityCapability> getSecurityCapabilities() throws IOException;
1637 * Turn the split switch on or off.
1638 * @param enabled enabled or not
1639 * @param synchronous If <code>true</code>, it waits until current split() call, if outstanding,
1640 * to return.
1641 * @return Previous switch value
1643 boolean splitSwitch(boolean enabled, boolean synchronous) throws IOException;
1646 * Turn the merge switch on or off.
1647 * @param enabled enabled or not
1648 * @param synchronous If <code>true</code>, it waits until current merge() call, if outstanding,
1649 * to return.
1650 * @return Previous switch value
1652 boolean mergeSwitch(boolean enabled, boolean synchronous) throws IOException;
1655 * Query the current state of the split switch.
1656 * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1658 boolean isSplitEnabled() throws IOException;
1661 * Query the current state of the merge switch.
1662 * @return <code>true</code> if the switch is enabled, <code>false</code> otherwise.
1664 boolean isMergeEnabled() throws IOException;
1667 * Add a new replication peer for replicating data to slave cluster.
1668 * @param peerId a short name that identifies the peer
1669 * @param peerConfig configuration for the replication peer
1670 * @throws IOException if a remote or network exception occurs
1672 default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
1673 throws IOException {
1674 addReplicationPeer(peerId, peerConfig, true);
1678 * Add a new replication peer for replicating data to slave cluster.
1679 * @param peerId a short name that identifies the peer
1680 * @param peerConfig configuration for the replication peer
1681 * @param enabled peer state, true if ENABLED and false if DISABLED
1682 * @throws IOException if a remote or network exception occurs
1684 default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
1685 throws IOException {
1686 get(addReplicationPeerAsync(peerId, peerConfig, enabled), getSyncWaitTimeout(),
1687 TimeUnit.MILLISECONDS);
1691 * Add a new replication peer but does not block and wait for it.
1692 * <p/>
1693 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1694 * ExecutionException if there was an error while executing the operation or TimeoutException in
1695 * case the wait timeout was not long enough to allow the operation to complete.
1696 * @param peerId a short name that identifies the peer
1697 * @param peerConfig configuration for the replication peer
1698 * @return the result of the async operation
1699 * @throws IOException IOException if a remote or network exception occurs
1701 default Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig)
1702 throws IOException {
1703 return addReplicationPeerAsync(peerId, peerConfig, true);
1707 * Add a new replication peer but does not block and wait for it.
1708 * <p>
1709 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1710 * ExecutionException if there was an error while executing the operation or TimeoutException in
1711 * case the wait timeout was not long enough to allow the operation to complete.
1712 * @param peerId a short name that identifies the peer
1713 * @param peerConfig configuration for the replication peer
1714 * @param enabled peer state, true if ENABLED and false if DISABLED
1715 * @return the result of the async operation
1716 * @throws IOException IOException if a remote or network exception occurs
1718 Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
1719 boolean enabled) throws IOException;
1722 * Remove a peer and stop the replication.
1723 * @param peerId a short name that identifies the peer
1724 * @throws IOException if a remote or network exception occurs
1726 default void removeReplicationPeer(String peerId) throws IOException {
1727 get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(),
1728 TimeUnit.MILLISECONDS);
1732 * Remove a replication peer but does not block and wait for it.
1733 * <p>
1734 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1735 * ExecutionException if there was an error while executing the operation or TimeoutException in
1736 * case the wait timeout was not long enough to allow the operation to complete.
1737 * @param peerId a short name that identifies the peer
1738 * @return the result of the async operation
1739 * @throws IOException IOException if a remote or network exception occurs
1741 Future<Void> removeReplicationPeerAsync(String peerId) throws IOException;
1744 * Restart the replication stream to the specified peer.
1745 * @param peerId a short name that identifies the peer
1746 * @throws IOException if a remote or network exception occurs
1748 default void enableReplicationPeer(String peerId) throws IOException {
1749 get(enableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1753 * Enable a replication peer but does not block and wait for it.
1754 * <p>
1755 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1756 * ExecutionException if there was an error while executing the operation or TimeoutException in
1757 * case the wait timeout was not long enough to allow the operation to complete.
1758 * @param peerId a short name that identifies the peer
1759 * @return the result of the async operation
1760 * @throws IOException IOException if a remote or network exception occurs
1762 Future<Void> enableReplicationPeerAsync(String peerId) throws IOException;
1765 * Stop the replication stream to the specified peer.
1766 * @param peerId a short name that identifies the peer
1767 * @throws IOException if a remote or network exception occurs
1769 default void disableReplicationPeer(String peerId) throws IOException {
1770 get(disableReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
1774 * Disable a replication peer but does not block and wait for it.
1775 * <p/>
1776 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1777 * ExecutionException if there was an error while executing the operation or TimeoutException in
1778 * case the wait timeout was not long enough to allow the operation to complete.
1779 * @param peerId a short name that identifies the peer
1780 * @return the result of the async operation
1781 * @throws IOException IOException if a remote or network exception occurs
1783 Future<Void> disableReplicationPeerAsync(String peerId) throws IOException;
1786 * Returns the configured ReplicationPeerConfig for the specified peer.
1787 * @param peerId a short name that identifies the peer
1788 * @return ReplicationPeerConfig for the peer
1789 * @throws IOException if a remote or network exception occurs
1791 ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws IOException;
1794 * Update the peerConfig for the specified peer.
1795 * @param peerId a short name that identifies the peer
1796 * @param peerConfig new config for the replication peer
1797 * @throws IOException if a remote or network exception occurs
1799 default void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
1800 throws IOException {
1801 get(updateReplicationPeerConfigAsync(peerId, peerConfig), getSyncWaitTimeout(),
1802 TimeUnit.MILLISECONDS);
1806 * Update the peerConfig for the specified peer but does not block and wait for it.
1807 * <p/>
1808 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1809 * ExecutionException if there was an error while executing the operation or TimeoutException in
1810 * case the wait timeout was not long enough to allow the operation to complete.
1811 * @param peerId a short name that identifies the peer
1812 * @param peerConfig new config for the replication peer
1813 * @return the result of the async operation
1814 * @throws IOException IOException if a remote or network exception occurs
1816 Future<Void> updateReplicationPeerConfigAsync(String peerId, ReplicationPeerConfig peerConfig)
1817 throws IOException;
1820 * Append the replicable table column family config from the specified peer.
1821 * @param id a short that identifies the cluster
1822 * @param tableCfs A map from tableName to column family names
1823 * @throws ReplicationException if tableCfs has conflict with existing config
1824 * @throws IOException if a remote or network exception occurs
1826 default void appendReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
1827 throws ReplicationException, IOException {
1828 if (tableCfs == null) {
1829 throw new ReplicationException("tableCfs is null");
1831 ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
1832 ReplicationPeerConfig newPeerConfig =
1833 ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
1834 updateReplicationPeerConfig(id, newPeerConfig);
1838 * Remove some table-cfs from config of the specified peer.
1839 * @param id a short name that identifies the cluster
1840 * @param tableCfs A map from tableName to column family names
1841 * @throws ReplicationException if tableCfs has conflict with existing config
1842 * @throws IOException if a remote or network exception occurs
1844 default void removeReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs)
1845 throws ReplicationException, IOException {
1846 if (tableCfs == null) {
1847 throw new ReplicationException("tableCfs is null");
1849 ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id);
1850 ReplicationPeerConfig newPeerConfig =
1851 ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
1852 updateReplicationPeerConfig(id, newPeerConfig);
1856 * Return a list of replication peers.
1857 * @return a list of replication peers description
1858 * @throws IOException if a remote or network exception occurs
1860 List<ReplicationPeerDescription> listReplicationPeers() throws IOException;
1863 * Return a list of replication peers.
1864 * @param pattern The compiled regular expression to match peer id
1865 * @return a list of replication peers description
1866 * @throws IOException if a remote or network exception occurs
1868 List<ReplicationPeerDescription> listReplicationPeers(Pattern pattern) throws IOException;
1871 * Transit current cluster to a new state in a synchronous replication peer.
1872 * @param peerId a short name that identifies the peer
1873 * @param state a new state of current cluster
1874 * @throws IOException if a remote or network exception occurs
1876 default void transitReplicationPeerSyncReplicationState(String peerId, SyncReplicationState state)
1877 throws IOException {
1878 get(transitReplicationPeerSyncReplicationStateAsync(peerId, state), getSyncWaitTimeout(),
1879 TimeUnit.MILLISECONDS);
1883 * Transit current cluster to a new state in a synchronous replication peer. But does not block
1884 * and wait for it.
1885 * <p>
1886 * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
1887 * ExecutionException if there was an error while executing the operation or TimeoutException in
1888 * case the wait timeout was not long enough to allow the operation to complete.
1889 * @param peerId a short name that identifies the peer
1890 * @param state a new state of current cluster
1891 * @throws IOException if a remote or network exception occurs
1893 Future<Void> transitReplicationPeerSyncReplicationStateAsync(String peerId,
1894 SyncReplicationState state) throws IOException;
1897 * Get the current cluster state in a synchronous replication peer.
1898 * @param peerId a short name that identifies the peer
1899 * @return the current cluster state
1900 * @throws IOException if a remote or network exception occurs
1902 default SyncReplicationState getReplicationPeerSyncReplicationState(String peerId)
1903 throws IOException {
1904 List<ReplicationPeerDescription> peers = listReplicationPeers(Pattern.compile(peerId));
1905 if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) {
1906 throw new IOException("Replication peer " + peerId + " does not exist");
1908 return peers.get(0).getSyncReplicationState();
1912 * Mark region server(s) as decommissioned to prevent additional regions from getting
1913 * assigned to them. Optionally unload the regions on the servers. If there are multiple servers
1914 * to be decommissioned, decommissioning them at the same time can prevent wasteful region
1915 * movements. Region unloading is asynchronous.
1916 * @param servers The list of servers to decommission.
1917 * @param offload True to offload the regions from the decommissioned servers
1919 void decommissionRegionServers(List<ServerName> servers, boolean offload) throws IOException;
1922 * List region servers marked as decommissioned, which can not be assigned regions.
1923 * @return List of decommissioned region servers.
1925 List<ServerName> listDecommissionedRegionServers() throws IOException;
1928 * Remove decommission marker from a region server to allow regions assignments.
1929 * Load regions onto the server if a list of regions is given. Region loading is
1930 * asynchronous.
1931 * @param server The server to recommission.
1932 * @param encodedRegionNames Regions to load onto the server.
1934 void recommissionRegionServer(ServerName server, List<byte[]> encodedRegionNames)
1935 throws IOException;
1938 * Find all table and column families that are replicated from this cluster
1939 * @return the replicated table-cfs list of this cluster.
1941 List<TableCFs> listReplicatedTableCFs() throws IOException;
1944 * Enable a table's replication switch.
1945 * @param tableName name of the table
1946 * @throws IOException if a remote or network exception occurs
1948 void enableTableReplication(TableName tableName) throws IOException;
1951 * Disable a table's replication switch.
1952 * @param tableName name of the table
1953 * @throws IOException if a remote or network exception occurs
1955 void disableTableReplication(TableName tableName) throws IOException;
1958 * Clear compacting queues on a regionserver.
1959 * @param serverName the region server name
1960 * @param queues the set of queue name
1961 * @throws IOException if a remote or network exception occurs
1962 * @throws InterruptedException
1964 void clearCompactionQueues(ServerName serverName, Set<String> queues)
1965 throws IOException, InterruptedException;
1968 * List dead region servers.
1969 * @return List of dead region servers.
1971 default List<ServerName> listDeadServers() throws IOException {
1972 return getClusterMetrics(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames();
1976 * Clear dead region servers from master.
1977 * @param servers list of dead region servers.
1978 * @throws IOException if a remote or network exception occurs
1979 * @return List of servers that are not cleared
1981 List<ServerName> clearDeadServers(List<ServerName> servers) throws IOException;
1984 * Create a new table by cloning the existent table schema.
1985 * @param tableName name of the table to be cloned
1986 * @param newTableName name of the new table where the table will be created
1987 * @param preserveSplits True if the splits should be preserved
1988 * @throws IOException if a remote or network exception occurs
1990 void cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits)
1991 throws IOException;
1994 * Switch the rpc throttle enable state.
1995 * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
1996 * @return Previous rpc throttle enabled value
1998 boolean switchRpcThrottle(boolean enable) throws IOException;
2001 * Get if the rpc throttle is enabled.
2002 * @return True if rpc throttle is enabled
2004 boolean isRpcThrottleEnabled() throws IOException;
2007 * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota
2008 * can be exceeded if region server has availble quota.
2009 * @param enable Set to <code>true</code> to enable, <code>false</code> to disable.
2010 * @return Previous exceed throttle enabled value
2012 boolean exceedThrottleQuotaSwitch(final boolean enable) throws IOException;
2015 * Fetches the table sizes on the filesystem as tracked by the HBase Master.
2017 Map<TableName, Long> getSpaceQuotaTableSizes() throws IOException;
2020 * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer.
2022 Map<TableName, ? extends SpaceQuotaSnapshotView> getRegionServerSpaceQuotaSnapshots(
2023 ServerName serverName) throws IOException;
2026 * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has
2027 * no quota information on that namespace.
2029 SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(String namespace) throws IOException;
2032 * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has
2033 * no quota information on that table.
2035 SpaceQuotaSnapshotView getCurrentSpaceQuotaSnapshot(TableName tableName) throws IOException;
2038 * Grants user specific permissions
2039 * @param userName user name
2040 * @param permission the specific permission
2041 * @param mergeExistingPermissions If set to false, later granted permissions will override
2042 * previous granted permissions. otherwise, it'll merge with previous granted
2043 * permissions.
2044 * @throws IOException if a remote or network exception occurs
2046 void grant(String userName, Permission permission, boolean mergeExistingPermissions)
2047 throws IOException;
2050 * Revokes user specific permissions
2051 * @param userName user name
2052 * @param permission the specific permission
2053 * @throws IOException if a remote or network exception occurs
2055 void revoke(String userName, Permission permission) throws IOException;