3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
.client
;
21 import java
.io
.Closeable
;
22 import java
.io
.IOException
;
23 import java
.util
.ArrayList
;
24 import java
.util
.Collection
;
25 import java
.util
.List
;
28 import java
.util
.concurrent
.Future
;
29 import java
.util
.regex
.Pattern
;
31 import org
.apache
.hadoop
.conf
.Configuration
;
32 import org
.apache
.hadoop
.hbase
.Abortable
;
33 import org
.apache
.hadoop
.hbase
.ClusterStatus
;
34 import org
.apache
.hadoop
.hbase
.HColumnDescriptor
;
35 import org
.apache
.hadoop
.hbase
.HRegionInfo
;
36 import org
.apache
.hadoop
.hbase
.HTableDescriptor
;
37 import org
.apache
.hadoop
.hbase
.NamespaceDescriptor
;
38 import org
.apache
.hadoop
.hbase
.NamespaceNotFoundException
;
39 import org
.apache
.hadoop
.hbase
.ProcedureInfo
;
40 import org
.apache
.hadoop
.hbase
.RegionLoad
;
41 import org
.apache
.hadoop
.hbase
.ServerName
;
42 import org
.apache
.hadoop
.hbase
.TableExistsException
;
43 import org
.apache
.hadoop
.hbase
.TableName
;
44 import org
.apache
.hadoop
.hbase
.TableNotFoundException
;
45 import org
.apache
.hadoop
.hbase
.classification
.InterfaceAudience
;
46 import org
.apache
.hadoop
.hbase
.client
.replication
.TableCFs
;
47 import org
.apache
.hadoop
.hbase
.client
.security
.SecurityCapability
;
48 import org
.apache
.hadoop
.hbase
.ipc
.CoprocessorRpcChannel
;
49 import org
.apache
.hadoop
.hbase
.procedure2
.LockInfo
;
50 import org
.apache
.hadoop
.hbase
.quotas
.QuotaFilter
;
51 import org
.apache
.hadoop
.hbase
.quotas
.QuotaRetriever
;
52 import org
.apache
.hadoop
.hbase
.quotas
.QuotaSettings
;
53 import org
.apache
.hadoop
.hbase
.regionserver
.wal
.FailedLogCloseException
;
54 import org
.apache
.hadoop
.hbase
.replication
.ReplicationException
;
55 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerConfig
;
56 import org
.apache
.hadoop
.hbase
.replication
.ReplicationPeerDescription
;
57 import org
.apache
.hadoop
.hbase
.snapshot
.HBaseSnapshotException
;
58 import org
.apache
.hadoop
.hbase
.snapshot
.RestoreSnapshotException
;
59 import org
.apache
.hadoop
.hbase
.snapshot
.SnapshotCreationException
;
60 import org
.apache
.hadoop
.hbase
.snapshot
.UnknownSnapshotException
;
61 import org
.apache
.hadoop
.hbase
.util
.Pair
;
64 * The administrative API for HBase. Obtain an instance from an {@link Connection#getAdmin()} and
65 * call {@link #close()} afterwards.
66 * <p>Admin can be used to create, drop, list, enable and disable tables, add and drop table
67 * column families and other administrative operations.
69 * @see ConnectionFactory
74 @InterfaceAudience.Public
75 public interface Admin
extends Abortable
, Closeable
{
76 int getOperationTimeout();
79 void abort(String why
, Throwable e
);
85 * @return Connection used by this object.
87 Connection
getConnection();
90 * @param tableName Table to check.
91 * @return True if table exists already.
94 boolean tableExists(final TableName tableName
) throws IOException
;
97 * List all the userspace tables.
99 * @return - returns an array of read-only HTableDescriptors
100 * @throws IOException if a remote or network exception occurs
101 * @deprecated since 2.0 version and will be removed in 3.0 version.
102 * use {@link #listTableDescriptors()}
105 HTableDescriptor
[] listTables() throws IOException
;
108 * List all the userspace tables.
110 * @return - returns a list of TableDescriptors
111 * @throws IOException if a remote or network exception occurs
113 List
<TableDescriptor
> listTableDescriptors() throws IOException
;
116 * List all the userspace tables matching the given pattern.
118 * @param pattern The compiled regular expression to match against
119 * @return - returns an array of read-only HTableDescriptors
120 * @throws IOException if a remote or network exception occurs
122 * @deprecated since 2.0 version and will be removed in 3.0 version.
123 * use {@link #listTableDescriptors(java.util.regex.Pattern)}
126 HTableDescriptor
[] listTables(Pattern pattern
) throws IOException
;
129 * List all the userspace tables matching the given pattern.
131 * @param pattern The compiled regular expression to match against
132 * @return - returns a list of TableDescriptors
133 * @throws IOException if a remote or network exception occurs
136 List
<TableDescriptor
> listTableDescriptors(Pattern pattern
) throws IOException
;
139 * List all the userspace tables matching the given regular expression.
141 * @param regex The regular expression to match against
142 * @return - returns an array of read-only HTableDescriptors
143 * @throws IOException if a remote or network exception occurs
144 * @see #listTables(java.util.regex.Pattern)
145 * @deprecated since 2.0 version and will be removed in 3.0 version.
146 * use {@link #listTableDescriptors(java.lang.String)}
149 HTableDescriptor
[] listTables(String regex
) throws IOException
;
152 * List all the userspace tables matching the given regular expression.
154 * @param regex The regular expression to match against
155 * @return - returns a list of TableDescriptors
156 * @throws IOException if a remote or network exception occurs
157 * @see #listTables(java.util.regex.Pattern)
159 List
<TableDescriptor
> listTableDescriptors(String regex
) throws IOException
;
162 * List all the tables matching the given pattern.
164 * @param pattern The compiled regular expression to match against
165 * @param includeSysTables False to match only against userspace tables
166 * @return - returns an array of read-only HTableDescriptors
167 * @throws IOException if a remote or network exception occurs
169 * @deprecated since 2.0 version and will be removed in 3.0 version.
170 * use {@link #listTableDescriptors(java.util.regex.Pattern, boolean)}
173 HTableDescriptor
[] listTables(Pattern pattern
, boolean includeSysTables
)
177 * List all the tables matching the given pattern.
179 * @param pattern The compiled regular expression to match against
180 * @param includeSysTables False to match only against userspace tables
181 * @return - returns a list of TableDescriptors
182 * @throws IOException if a remote or network exception occurs
185 List
<TableDescriptor
> listTableDescriptors(Pattern pattern
, boolean includeSysTables
)
189 * List all the tables matching the given pattern.
191 * @param regex The regular expression to match against
192 * @param includeSysTables False to match only against userspace tables
193 * @return - returns an array of read-only HTableDescriptors
194 * @throws IOException if a remote or network exception occurs
195 * @see #listTables(java.util.regex.Pattern, boolean)
196 * @deprecated since 2.0 version and will be removed in 3.0 version.
197 * use {@link #listTableDescriptors(java.lang.String, boolean)}
200 HTableDescriptor
[] listTables(String regex
, boolean includeSysTables
)
204 * List all the tables matching the given pattern.
206 * @param regex The regular expression to match against
207 * @param includeSysTables False to match only against userspace tables
208 * @return - returns a list of TableDescriptors
209 * @throws IOException if a remote or network exception occurs
210 * @see #listTables(java.util.regex.Pattern, boolean)
212 List
<TableDescriptor
> listTableDescriptors(String regex
, boolean includeSysTables
)
216 * List all of the names of userspace tables.
218 * @return TableName[] table names
219 * @throws IOException if a remote or network exception occurs
221 TableName
[] listTableNames() throws IOException
;
224 * List all of the names of userspace tables.
225 * @param pattern The regular expression to match against
226 * @return TableName[] table names
227 * @throws IOException if a remote or network exception occurs
229 TableName
[] listTableNames(Pattern pattern
) throws IOException
;
232 * List all of the names of userspace tables.
233 * @param regex The regular expression to match against
234 * @return TableName[] table names
235 * @throws IOException if a remote or network exception occurs
237 TableName
[] listTableNames(String regex
) throws IOException
;
240 * List all of the names of userspace tables.
241 * @param pattern The regular expression to match against
242 * @param includeSysTables False to match only against userspace tables
243 * @return TableName[] table names
244 * @throws IOException if a remote or network exception occurs
246 TableName
[] listTableNames(final Pattern pattern
, final boolean includeSysTables
)
250 * List all of the names of userspace tables.
251 * @param regex The regular expression to match against
252 * @param includeSysTables False to match only against userspace tables
253 * @return TableName[] table names
254 * @throws IOException if a remote or network exception occurs
256 TableName
[] listTableNames(final String regex
, final boolean includeSysTables
)
260 * Method for getting the tableDescriptor
262 * @param tableName as a {@link TableName}
263 * @return the read-only tableDescriptor
264 * @throws org.apache.hadoop.hbase.TableNotFoundException
265 * @throws IOException if a remote or network exception occurs
266 * @deprecated since 2.0 version and will be removed in 3.0 version.
267 * use {@link #listTableDescriptor(TableName)}
270 HTableDescriptor
getTableDescriptor(final TableName tableName
)
271 throws TableNotFoundException
, IOException
;
274 * Method for getting the tableDescriptor
276 * @param tableName as a {@link TableName}
277 * @return the tableDescriptor
278 * @throws org.apache.hadoop.hbase.TableNotFoundException
279 * @throws IOException if a remote or network exception occurs
281 TableDescriptor
listTableDescriptor(final TableName tableName
)
282 throws TableNotFoundException
, IOException
;
285 * Creates a new table. Synchronous operation.
287 * @param desc table descriptor for table
288 * @throws IllegalArgumentException if the table name is reserved
289 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
290 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
291 * threads, the table may have been created between test-for-existence and attempt-at-creation).
292 * @throws IOException if a remote or network exception occurs
293 * @deprecated since 2.0 version and will be removed in 3.0 version.
294 * use {@link #createTable(TableDescriptor)}
297 default void createTable(HTableDescriptor desc
) throws IOException
{
298 createTable((TableDescriptor
) desc
);
302 * Creates a new table. Synchronous operation.
304 * @param desc table descriptor for table
305 * @throws IllegalArgumentException if the table name is reserved
306 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
307 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
308 * threads, the table may have been created between test-for-existence and attempt-at-creation).
309 * @throws IOException if a remote or network exception occurs
311 void createTable(TableDescriptor desc
) throws IOException
;
314 * Creates a new table with the specified number of regions. The start key specified will become
315 * the end key of the first region of the table, and the end key specified will become the start
316 * key of the last region of the table (the first region has a null start key and the last region
317 * has a null end key). BigInteger math will be used to divide the key range specified into enough
318 * segments to make the required number of total regions. Synchronous operation.
320 * @param desc table descriptor for table
321 * @param startKey beginning of key range
322 * @param endKey end of key range
323 * @param numRegions the total number of regions to create
324 * @throws IllegalArgumentException if the table name is reserved
325 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
326 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
327 * threads, the table may have been created between test-for-existence and attempt-at-creation).
328 * @throws IOException
329 * @deprecated since 2.0 version and will be removed in 3.0 version.
330 * use {@link #createTable(TableDescriptor, byte[], byte[], int)}
333 default void createTable(HTableDescriptor desc
, byte[] startKey
, byte[] endKey
, int numRegions
)
335 createTable((TableDescriptor
) desc
, startKey
, endKey
, numRegions
);
339 * Creates a new table with the specified number of regions. The start key specified will become
340 * the end key of the first region of the table, and the end key specified will become the start
341 * key of the last region of the table (the first region has a null start key and the last region
342 * has a null end key). BigInteger math will be used to divide the key range specified into enough
343 * segments to make the required number of total regions. Synchronous operation.
345 * @param desc table descriptor for table
346 * @param startKey beginning of key range
347 * @param endKey end of key range
348 * @param numRegions the total number of regions to create
349 * @throws IllegalArgumentException if the table name is reserved
350 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
351 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
352 * threads, the table may have been created between test-for-existence and attempt-at-creation).
353 * @throws IOException
355 void createTable(TableDescriptor desc
, byte[] startKey
, byte[] endKey
, int numRegions
)
359 * Creates a new table with an initial set of empty regions defined by the specified split keys.
360 * The total number of regions created will be the number of split keys plus one. Synchronous
361 * operation. Note : Avoid passing empty split key.
363 * @param desc table descriptor for table
364 * @param splitKeys array of split keys for the initial regions of the table
365 * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
366 * and if the split key has empty byte array.
367 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
368 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
369 * threads, the table may have been created between test-for-existence and attempt-at-creation).
370 * @throws IOException
371 * @deprecated since 2.0 version and will be removed in 3.0 version.
372 * use {@link #createTable(TableDescriptor, byte[][])}
375 default void createTable(final HTableDescriptor desc
, byte[][] splitKeys
) throws IOException
{
376 createTable((TableDescriptor
) desc
, splitKeys
);
380 * Creates a new table with an initial set of empty regions defined by the specified split keys.
381 * The total number of regions created will be the number of split keys plus one. Synchronous
382 * operation. Note : Avoid passing empty split key.
384 * @param desc table descriptor for table
385 * @param splitKeys array of split keys for the initial regions of the table
386 * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
387 * and if the split key has empty byte array.
388 * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
389 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
390 * threads, the table may have been created between test-for-existence and attempt-at-creation).
391 * @throws IOException
393 void createTable(final TableDescriptor desc
, byte[][] splitKeys
) throws IOException
;
396 * Creates a new table but does not block and wait for it to come online.
397 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
398 * It may throw ExecutionException if there was an error while executing the operation
399 * or TimeoutException in case the wait timeout was not long enough to allow the
400 * operation to complete.
401 * Throws IllegalArgumentException Bad table name, if the split keys
402 * are repeated and if the split key has empty byte array.
404 * @param desc table descriptor for table
405 * @param splitKeys keys to check if the table has been created with all split keys
406 * @throws IOException if a remote or network exception occurs
407 * @return the result of the async creation. You can use Future.get(long, TimeUnit)
408 * to wait on the operation to complete.
409 * @deprecated since 2.0 version and will be removed in 3.0 version.
410 * use {@link #createTableAsync(TableDescriptor, byte[][])}
413 default Future
<Void
> createTableAsync(final HTableDescriptor desc
, final byte[][] splitKeys
)
415 return createTableAsync((TableDescriptor
) desc
, splitKeys
);
419 * Creates a new table but does not block and wait for it to come online.
420 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
421 * It may throw ExecutionException if there was an error while executing the operation
422 * or TimeoutException in case the wait timeout was not long enough to allow the
423 * operation to complete.
424 * Throws IllegalArgumentException Bad table name, if the split keys
425 * are repeated and if the split key has empty byte array.
427 * @param desc table descriptor for table
428 * @param splitKeys keys to check if the table has been created with all split keys
429 * @throws IOException if a remote or network exception occurs
430 * @return the result of the async creation. You can use Future.get(long, TimeUnit)
431 * to wait on the operation to complete.
433 Future
<Void
> createTableAsync(final TableDescriptor desc
, final byte[][] splitKeys
)
437 * Deletes a table. Synchronous operation.
439 * @param tableName name of table to delete
440 * @throws IOException if a remote or network exception occurs
442 void deleteTable(final TableName tableName
) throws IOException
;
445 * Deletes the table but does not block and wait for it be completely removed.
446 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
447 * It may throw ExecutionException if there was an error while executing the operation
448 * or TimeoutException in case the wait timeout was not long enough to allow the
449 * operation to complete.
451 * @param tableName name of table to delete
452 * @throws IOException if a remote or network exception occurs
453 * @return the result of the async delete. You can use Future.get(long, TimeUnit)
454 * to wait on the operation to complete.
456 Future
<Void
> deleteTableAsync(TableName tableName
) throws IOException
;
459 * Deletes tables matching the passed in pattern and wait on completion. Warning: Use this method
460 * carefully, there is no prompting and the effect is immediate. Consider using {@link
461 * #listTableDescriptors(java.lang.String)}
462 * and {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
464 * @param regex The regular expression to match table names against
465 * @return Table descriptors for tables that couldn't be deleted.
466 * The return htds are read-only
467 * @throws IOException
468 * @see #deleteTables(java.util.regex.Pattern)
469 * @see #deleteTable(org.apache.hadoop.hbase.TableName)
470 * @deprecated since 2.0 version and will be removed in 3.0 version
471 * This is just a trivial helper method without any magic.
472 * Consider using {@link #listTableDescriptors(java.lang.String)}
473 * and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
476 HTableDescriptor
[] deleteTables(String regex
) throws IOException
;
479 * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method
480 * carefully, there is no prompting and the effect is immediate. Consider using {@link
481 * #listTableDescriptors(java.util.regex.Pattern)} and
482 * {@link #deleteTable(org.apache.hadoop.hbase.TableName)}
484 * @param pattern The pattern to match table names against
485 * @return Table descriptors for tables that couldn't be deleted
486 * The return htds are read-only
487 * @throws IOException
488 * @deprecated since 2.0 version and will be removed in 3.0 version
489 * This is just a trivial helper method without any magic.
490 * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)}
491 * and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
494 HTableDescriptor
[] deleteTables(Pattern pattern
) throws IOException
;
498 * Synchronous operation.
500 * @param tableName name of table to truncate
501 * @param preserveSplits True if the splits should be preserved
502 * @throws IOException if a remote or network exception occurs
504 public void truncateTable(final TableName tableName
, final boolean preserveSplits
)
508 * Truncate the table but does not block and wait for it be completely enabled. You can use
509 * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
510 * ExecutionException if there was an error while executing the operation or TimeoutException in
511 * case the wait timeout was not long enough to allow the operation to complete.
512 * @param tableName name of table to delete
513 * @param preserveSplits true if the splits should be preserved
514 * @throws IOException if a remote or network exception occurs
515 * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the
516 * operation to complete.
518 Future
<Void
> truncateTableAsync(final TableName tableName
, final boolean preserveSplits
)
522 * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)}
523 * and {@link #isTableEnabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
524 * disabled state for it to be enabled.
526 * @param tableName name of the table
527 * @throws IOException if a remote or network exception occurs There could be couple types of
528 * IOException TableNotFoundException means the table doesn't exist. TableNotDisabledException
529 * means the table isn't in disabled state.
530 * @see #isTableEnabled(org.apache.hadoop.hbase.TableName)
531 * @see #disableTable(org.apache.hadoop.hbase.TableName)
532 * @see #enableTableAsync(org.apache.hadoop.hbase.TableName)
534 void enableTable(final TableName tableName
) throws IOException
;
537 * Enable the table but does not block and wait for it be completely enabled.
538 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
539 * It may throw ExecutionException if there was an error while executing the operation
540 * or TimeoutException in case the wait timeout was not long enough to allow the
541 * operation to complete.
543 * @param tableName name of table to delete
544 * @throws IOException if a remote or network exception occurs
545 * @return the result of the async enable. You can use Future.get(long, TimeUnit)
546 * to wait on the operation to complete.
548 Future
<Void
> enableTableAsync(final TableName tableName
) throws IOException
;
551 * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method
552 * carefully, there is no prompting and the effect is immediate. Consider using {@link
553 * #listTableDescriptors(java.lang.String)} and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
555 * @param regex The regular expression to match table names against
556 * @throws IOException
557 * @return Table descriptors for tables that couldn't be enabled.
558 * The return HTDs are read-only.
559 * @see #enableTables(java.util.regex.Pattern)
560 * @see #enableTable(org.apache.hadoop.hbase.TableName)
561 * @deprecated since 2.0 version and will be removed in 3.0 version
562 * This is just a trivial helper method without any magic.
563 * Consider using {@link #listTableDescriptors(java.lang.String)}
564 * and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
567 HTableDescriptor
[] enableTables(String regex
) throws IOException
;
570 * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method
571 * carefully, there is no prompting and the effect is immediate. Consider using {@link
572 * #listTableDescriptors(java.util.regex.Pattern)} and
573 * {@link #enableTable(org.apache.hadoop.hbase.TableName)}
575 * @param pattern The pattern to match table names against
576 * @throws IOException
577 * @return Table descriptors for tables that couldn't be enabled.
578 * The return HTDs are read-only.
579 * @deprecated since 2.0 version and will be removed in 3.0 version
580 * This is just a trivial helper method without any magic.
581 * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)}
582 * and {@link #enableTable(org.apache.hadoop.hbase.TableName)}
585 HTableDescriptor
[] enableTables(Pattern pattern
) throws IOException
;
588 * Disable the table but does not block and wait for it be completely disabled.
589 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
590 * It may throw ExecutionException if there was an error while executing the operation
591 * or TimeoutException in case the wait timeout was not long enough to allow the
592 * operation to complete.
594 * @param tableName name of table to delete
595 * @throws IOException if a remote or network exception occurs
596 * @return the result of the async disable. You can use Future.get(long, TimeUnit)
597 * to wait on the operation to complete.
599 Future
<Void
> disableTableAsync(final TableName tableName
) throws IOException
;
602 * Disable table and wait on completion. May timeout eventually. Use {@link
603 * #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
604 * {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
605 * enabled state for it to be disabled.
608 * @throws IOException There could be couple types of IOException TableNotFoundException means the
609 * table doesn't exist. TableNotEnabledException means the table isn't in enabled state.
611 void disableTable(final TableName tableName
) throws IOException
;
614 * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method
615 * carefully, there is no prompting and the effect is immediate. Consider using {@link
616 * #listTableDescriptors(java.lang.String)}
617 * and {@link #disableTable(org.apache.hadoop.hbase.TableName)}
619 * @param regex The regular expression to match table names against
620 * @return Table descriptors for tables that couldn't be disabled
621 * The return htds are read-only
622 * @throws IOException
623 * @see #disableTables(java.util.regex.Pattern)
624 * @see #disableTable(org.apache.hadoop.hbase.TableName)
625 * @deprecated since 2.0 version and will be removed in 3.0 version
626 * This is just a trivial helper method without any magic.
627 * Consider using {@link #listTableDescriptors(java.lang.String)}
628 * and {@link #disableTable(org.apache.hadoop.hbase.TableName)}
631 HTableDescriptor
[] disableTables(String regex
) throws IOException
;
634 * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method
635 * carefully, there is no prompting and the effect is immediate. Consider using {@link
636 * #listTableDescriptors(java.util.regex.Pattern)} and
637 * {@link #disableTable(org.apache.hadoop.hbase.TableName)}
639 * @param pattern The pattern to match table names against
640 * @return Table descriptors for tables that couldn't be disabled
641 * The return htds are read-only
642 * @throws IOException
643 * @deprecated since 2.0 version and will be removed in 3.0 version
644 * This is just a trivial helper method without any magic.
645 * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)}
646 * and {@link #disableTable(org.apache.hadoop.hbase.TableName)}
649 HTableDescriptor
[] disableTables(Pattern pattern
) throws IOException
;
652 * @param tableName name of table to check
653 * @return true if table is on-line
654 * @throws IOException if a remote or network exception occurs
656 boolean isTableEnabled(TableName tableName
) throws IOException
;
659 * @param tableName name of table to check
660 * @return true if table is off-line
661 * @throws IOException if a remote or network exception occurs
663 boolean isTableDisabled(TableName tableName
) throws IOException
;
666 * @param tableName name of table to check
667 * @return true if all regions of the table are available
668 * @throws IOException if a remote or network exception occurs
670 boolean isTableAvailable(TableName tableName
) throws IOException
;
673 * Use this api to check if the table has been created with the specified number of splitkeys
674 * which was used while creating the given table. Note : If this api is used after a table's
675 * region gets splitted, the api may return false.
677 * @param tableName name of table to check
678 * @param splitKeys keys to check if the table has been created with all split keys
679 * @throws IOException if a remote or network excpetion occurs
681 boolean isTableAvailable(TableName tableName
, byte[][] splitKeys
) throws IOException
;
684 * Get the status of alter command - indicates how many regions have received the updated schema
685 * Asynchronous operation.
687 * @param tableName TableName instance
688 * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are
689 * yet to be updated Pair.getSecond() is the total number of regions of the table
690 * @throws IOException if a remote or network exception occurs
692 Pair
<Integer
, Integer
> getAlterStatus(final TableName tableName
) throws IOException
;
695 * Get the status of alter command - indicates how many regions have received the updated schema
696 * Asynchronous operation.
698 * @param tableName name of the table to get the status of
699 * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are
700 * yet to be updated Pair.getSecond() is the total number of regions of the table
701 * @throws IOException if a remote or network exception occurs
702 * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #getAlterStatus(TableName)}
706 Pair
<Integer
, Integer
> getAlterStatus(final byte[] tableName
) throws IOException
;
709 * Add a column family to an existing table. Asynchronous operation.
711 * @param tableName name of the table to add column family to
712 * @param columnFamily column family descriptor of column family to be added
713 * @throws IOException if a remote or network exception occurs
714 * @deprecated As of release 2.0.0.
715 * (<a href="https://issues.apache.org/jira/browse/HBASE-1989">HBASE-1989</a>).
716 * This will be removed in HBase 3.0.0.
717 * Use {@link #addColumnFamily(TableName, ColumnFamilyDescriptor)}.
720 void addColumn(final TableName tableName
, final HColumnDescriptor columnFamily
)
724 * Add a column family to an existing table.
726 * @param tableName name of the table to add column family to
727 * @param columnFamily column family descriptor of column family to be added
728 * @throws IOException if a remote or network exception occurs
730 void addColumnFamily(final TableName tableName
, final ColumnFamilyDescriptor columnFamily
)
734 * Add a column family to an existing table. Asynchronous operation.
735 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
736 * It may throw ExecutionException if there was an error while executing the operation
737 * or TimeoutException in case the wait timeout was not long enough to allow the
738 * operation to complete.
740 * @param tableName name of the table to add column family to
741 * @param columnFamily column family descriptor of column family to be added
742 * @throws IOException if a remote or network exception occurs
743 * @return the result of the async add column family. You can use Future.get(long, TimeUnit) to
744 * wait on the operation to complete.
746 Future
<Void
> addColumnFamilyAsync(final TableName tableName
, final ColumnFamilyDescriptor columnFamily
)
750 * Delete a column family from a table. Asynchronous operation.
752 * @param tableName name of table
753 * @param columnFamily name of column family to be deleted
754 * @throws IOException if a remote or network exception occurs
755 * @deprecated As of release 2.0.0.
756 * (<a href="https://issues.apache.org/jira/browse/HBASE-1989">HBASE-1989</a>).
757 * This will be removed in HBase 3.0.0.
758 * Use {@link #deleteColumnFamily(TableName, byte[])}}.
761 void deleteColumn(final TableName tableName
, final byte[] columnFamily
) throws IOException
;
764 * Delete a column family from a table. Asynchronous operation.
766 * @param tableName name of table
767 * @param columnFamily name of column family to be deleted
768 * @throws IOException if a remote or network exception occurs
770 void deleteColumnFamily(final TableName tableName
, final byte[] columnFamily
) throws IOException
;
773 * Delete a column family from a table. Asynchronous operation.
774 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
775 * It may throw ExecutionException if there was an error while executing the operation
776 * or TimeoutException in case the wait timeout was not long enough to allow the
777 * operation to complete.
779 * @param tableName name of table
780 * @param columnFamily name of column family to be deleted
781 * @throws IOException if a remote or network exception occurs
782 * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to
783 * wait on the operation to complete.
785 Future
<Void
> deleteColumnFamilyAsync(final TableName tableName
, final byte[] columnFamily
)
789 * Modify an existing column family on a table.
791 * @param tableName name of table
792 * @param columnFamily new column family descriptor to use
793 * @throws IOException if a remote or network exception occurs
794 * @deprecated As of release 2.0.0.
795 * (<a href="https://issues.apache.org/jira/browse/HBASE-1989">HBASE-1989</a>).
796 * This will be removed in HBase 3.0.0.
797 * Use {@link #modifyColumnFamily(TableName, ColumnFamilyDescriptor)}.
800 void modifyColumn(final TableName tableName
, final HColumnDescriptor columnFamily
)
804 * Modify an existing column family on a table.
806 * @param tableName name of table
807 * @param columnFamily new column family descriptor to use
808 * @throws IOException if a remote or network exception occurs
810 void modifyColumnFamily(final TableName tableName
, final ColumnFamilyDescriptor columnFamily
)
814 * Modify an existing column family on a table. Asynchronous operation.
815 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
816 * It may throw ExecutionException if there was an error while executing the operation
817 * or TimeoutException in case the wait timeout was not long enough to allow the
818 * operation to complete.
820 * @param tableName name of table
821 * @param columnFamily new column family descriptor to use
822 * @throws IOException if a remote or network exception occurs
823 * @return the result of the async modify column family. You can use Future.get(long, TimeUnit) to
824 * wait on the operation to complete.
826 Future
<Void
> modifyColumnFamilyAsync(TableName tableName
, ColumnFamilyDescriptor columnFamily
)
831 * Close a region. For expert-admins. Runs close on the regionserver. The master will not be
832 * informed of the close.
834 * @param regionname region name to close
835 * @param serverName If supplied, we'll use this location rather than the one currently in
836 * <code>hbase:meta</code>
837 * @throws IOException if a remote or network exception occurs
839 void closeRegion(final String regionname
, final String serverName
) throws IOException
;
842 * Close a region. For expert-admins Runs close on the regionserver. The master will not be
843 * informed of the close.
845 * @param regionname region name to close
846 * @param serverName The servername of the regionserver. If passed null we will use servername
847 * found in the hbase:meta table. A server name is made of host, port and startcode. Here is an
848 * example: <code> host187.example.com,60020,1289493121758</code>
849 * @throws IOException if a remote or network exception occurs
851 void closeRegion(final byte[] regionname
, final String serverName
) throws IOException
;
854 * For expert-admins. Runs close on the regionserver. Closes a region based on the encoded region
855 * name. The region server name is mandatory. If the servername is provided then based on the
856 * online regions in the specified regionserver the specified region will be closed. The master
857 * will not be informed of the close. Note that the regionname is the encoded regionname.
859 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
860 * suffix: e.g. if regionname is
861 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
862 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
863 * @param serverName The servername of the regionserver. A server name is made of host, port and
864 * startcode. This is mandatory. Here is an example:
865 * <code> host187.example.com,60020,1289493121758</code>
866 * @return true if the region was closed, false if not.
867 * @throws IOException if a remote or network exception occurs
869 boolean closeRegionWithEncodedRegionName(final String encodedRegionName
, final String serverName
)
873 * Close a region. For expert-admins Runs close on the regionserver. The master will not be
874 * informed of the close.
878 * @throws IOException
880 void closeRegion(final ServerName sn
, final HRegionInfo hri
) throws IOException
;
883 * Get all the online regions on a region server.
885 List
<HRegionInfo
> getOnlineRegions(final ServerName sn
) throws IOException
;
888 * Flush a table. Synchronous operation.
890 * @param tableName table to flush
891 * @throws IOException if a remote or network exception occurs
893 void flush(final TableName tableName
) throws IOException
;
896 * Flush an individual region. Synchronous operation.
898 * @param regionName region to flush
899 * @throws IOException if a remote or network exception occurs
901 void flushRegion(final byte[] regionName
) throws IOException
;
904 * Compact a table. Asynchronous operation.
906 * @param tableName table to compact
907 * @throws IOException if a remote or network exception occurs
909 void compact(final TableName tableName
) throws IOException
;
912 * Compact an individual region. Asynchronous operation.
914 * @param regionName region to compact
915 * @throws IOException if a remote or network exception occurs
917 void compactRegion(final byte[] regionName
) throws IOException
;
920 * Compact a column family within a table. Asynchronous operation.
922 * @param tableName table to compact
923 * @param columnFamily column family within a table
924 * @throws IOException if a remote or network exception occurs
926 void compact(final TableName tableName
, final byte[] columnFamily
)
930 * Compact a column family within a region. Asynchronous operation.
932 * @param regionName region to compact
933 * @param columnFamily column family within a region
934 * @throws IOException if a remote or network exception occurs
936 void compactRegion(final byte[] regionName
, final byte[] columnFamily
)
940 * Major compact a table. Asynchronous operation.
942 * @param tableName table to major compact
943 * @throws IOException if a remote or network exception occurs
945 void majorCompact(TableName tableName
) throws IOException
;
948 * Major compact a table or an individual region. Asynchronous operation.
950 * @param regionName region to major compact
951 * @throws IOException if a remote or network exception occurs
953 void majorCompactRegion(final byte[] regionName
) throws IOException
;
956 * Major compact a column family within a table. Asynchronous operation.
958 * @param tableName table to major compact
959 * @param columnFamily column family within a table
960 * @throws IOException if a remote or network exception occurs
962 void majorCompact(TableName tableName
, final byte[] columnFamily
)
966 * Major compact a column family within region. Asynchronous operation.
968 * @param regionName egion to major compact
969 * @param columnFamily column family within a region
970 * @throws IOException if a remote or network exception occurs
972 void majorCompactRegion(final byte[] regionName
, final byte[] columnFamily
)
976 * Compact all regions on the region server
977 * @param sn the region server name
978 * @param major if it's major compaction
979 * @throws IOException
980 * @throws InterruptedException
982 public void compactRegionServer(final ServerName sn
, boolean major
)
983 throws IOException
, InterruptedException
;
986 * Move the region <code>r</code> to <code>dest</code>.
988 * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name
989 * suffix: e.g. if regionname is
990 * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
991 * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
992 * @param destServerName The servername of the destination regionserver. If passed the empty byte
993 * array we'll assign to a random server. A server name is made of host, port and startcode.
994 * Here is an example: <code> host187.example.com,60020,1289493121758</code>
995 * @throws IOException if we can't find a region named
996 * <code>encodedRegionName</code>
998 void move(final byte[] encodedRegionName
, final byte[] destServerName
)
1002 * @param regionName Region name to assign.
1004 void assign(final byte[] regionName
)
1008 * Unassign a region from current hosting regionserver. Region will then be assigned to a
1009 * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link
1010 * #move(byte[], byte[])} if you want to control the region movement.
1012 * @param regionName Region to unassign. Will clear any existing RegionPlan if one found.
1013 * @param force If true, force unassign (Will remove region from regions-in-transition too if
1014 * present. If results in double assignment use hbck -fix to resolve. To be used by experts).
1016 void unassign(final byte[] regionName
, final boolean force
)
1020 * Offline specified region from master's in-memory state. It will not attempt to reassign the
1021 * region as in unassign. This API can be used when a region not served by any region server and
1022 * still online as per Master's in memory state. If this API is incorrectly used on active region
1023 * then master will loose track of that region. This is a special method that should be used by
1026 * @param regionName Region to offline.
1027 * @throws IOException
1029 void offline(final byte[] regionName
) throws IOException
;
1032 * Turn the load balancer on or off.
1034 * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
1035 * @return Previous balancer value
1037 boolean setBalancerRunning(final boolean on
, final boolean synchronous
)
1041 * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
1042 * reassignments. Can NOT run for various reasons. Check logs.
1044 * @return True if balancer ran, false otherwise.
1046 boolean balancer() throws IOException
;
1049 * Invoke the balancer. Will run the balancer and if regions to move, it will
1050 * go ahead and do the reassignments. If there is region in transition, force parameter of true
1051 * would still run balancer. Can *not* run for other reasons. Check
1053 * @param force whether we should force balance even if there is region in transition
1054 * @return True if balancer ran, false otherwise.
1056 boolean balancer(boolean force
) throws IOException
;
1059 * Query the current state of the balancer
1061 * @return true if the balancer is enabled, false otherwise.
1063 boolean isBalancerEnabled() throws IOException
;
1066 * Invoke region normalizer. Can NOT run for various reasons. Check logs.
1068 * @return True if region normalizer ran, false otherwise.
1070 boolean normalize() throws IOException
;
1073 * Query the current state of the region normalizer
1075 * @return true if region normalizer is enabled, false otherwise.
1077 boolean isNormalizerEnabled() throws IOException
;
1080 * Turn region normalizer on or off.
1082 * @return Previous normalizer value
1084 boolean setNormalizerRunning(final boolean on
)
1088 * Enable/Disable the catalog janitor
1090 * @param enable if true enables the catalog janitor
1091 * @return the previous state
1093 boolean enableCatalogJanitor(boolean enable
) throws IOException
;
1096 * Ask for a scan of the catalog table
1098 * @return the number of entries cleaned
1100 int runCatalogScan() throws IOException
;
1103 * Query on the catalog janitor state (Enabled/Disabled?)
1106 boolean isCatalogJanitorEnabled() throws IOException
;
1109 * Enable/Disable the cleaner chore
1111 * @param on if true enables the cleaner chore
1112 * @return the previous state
1113 * @throws IOException
1115 public boolean setCleanerChoreRunning(final boolean on
) throws IOException
;
1118 * Ask for cleaner chore to run
1120 * @return True if cleaner chore ran, false otherwise
1121 * @throws IOException
1123 public boolean runCleanerChore() throws IOException
;
1126 * Query on the cleaner chore state (Enabled/Disabled?)
1128 * @throws IOException
1130 public boolean isCleanerChoreEnabled() throws IOException
;
1133 * Merge two regions. Asynchronous operation.
1135 * @param nameOfRegionA encoded or full name of region a
1136 * @param nameOfRegionB encoded or full name of region b
1137 * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent
1139 * @throws IOException
1140 * @deprecated Since 2.0. Will be removed in 3.0. Use
1141 * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead.
1144 void mergeRegions(final byte[] nameOfRegionA
, final byte[] nameOfRegionB
,
1145 final boolean forcible
) throws IOException
;
1149 * Merge two regions. Asynchronous operation.
1151 * @param nameOfRegionA encoded or full name of region a
1152 * @param nameOfRegionB encoded or full name of region b
1153 * @param forcible true if do a compulsory merge, otherwise we will only merge
1154 * two adjacent regions
1155 * @throws IOException
1157 Future
<Void
> mergeRegionsAsync(
1158 final byte[] nameOfRegionA
,
1159 final byte[] nameOfRegionB
,
1160 final boolean forcible
) throws IOException
;
1163 * Merge regions. Asynchronous operation.
1165 * @param nameofRegionsToMerge encoded or full name of daughter regions
1166 * @param forcible true if do a compulsory merge, otherwise we will only merge
1168 * @throws IOException
1170 Future
<Void
> mergeRegionsAsync(
1171 final byte[][] nameofRegionsToMerge
,
1172 final boolean forcible
) throws IOException
;
1175 * Split a table. Asynchronous operation.
1177 * @param tableName table to split
1178 * @throws IOException if a remote or network exception occurs
1180 void split(final TableName tableName
) throws IOException
;
1183 * Split an individual region. Asynchronous operation.
1185 * @param regionName region to split
1186 * @throws IOException if a remote or network exception occurs
1187 * @deprecated Since 2.0. Will be removed in 3.0. Use
1188 * {@link #splitRegionAsync(byte[], byte[])} instead.
1190 void splitRegion(final byte[] regionName
) throws IOException
;
1193 * Split a table. Asynchronous operation.
1195 * @param tableName table to split
1196 * @param splitPoint the explicit position to split on
1197 * @throws IOException if a remote or network exception occurs
1199 void split(final TableName tableName
, final byte[] splitPoint
)
1203 * Split an individual region. Asynchronous operation.
1205 * @param regionName region to split
1206 * @param splitPoint the explicit position to split on
1207 * @throws IOException if a remote or network exception occurs
1208 * @deprecated Since 2.0. Will be removed in 3.0. Use
1209 * {@link #splitRegionAsync(byte[], byte[])} instead.
1211 void splitRegion(final byte[] regionName
, final byte[] splitPoint
)
1215 * Split an individual region. Asynchronous operation.
1216 * @param regionName region to split
1217 * @param splitPoint the explicit position to split on
1218 * @throws IOException if a remote or network exception occurs
1220 Future
<Void
> splitRegionAsync(byte[] regionName
, byte[] splitPoint
)
1224 * Modify an existing table, more IRB friendly version.
1226 * @param tableName name of table.
1227 * @param htd modified description of the table
1228 * @throws IOException if a remote or network exception occurs
1229 * @deprecated since 2.0 version and will be removed in 3.0 version.
1230 * use {@link #modifyTable(TableDescriptor)}
1233 void modifyTable(final TableName tableName
, final HTableDescriptor htd
)
1237 * Modify an existing table, more IRB friendly version.
1239 * @param td modified description of the table
1240 * @throws IOException if a remote or network exception occurs
1242 void modifyTable(final TableDescriptor td
) throws IOException
;
1245 * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that
1246 * it may be a while before your schema change is updated across all of the table.
1247 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1248 * It may throw ExecutionException if there was an error while executing the operation
1249 * or TimeoutException in case the wait timeout was not long enough to allow the
1250 * operation to complete.
1252 * @param tableName name of table.
1253 * @param htd modified description of the table
1254 * @throws IOException if a remote or network exception occurs
1255 * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
1256 * operation to complete
1257 * @deprecated since 2.0 version and will be removed in 3.0 version.
1258 * use {@link #modifyTableAsync(TableDescriptor)}
1261 Future
<Void
> modifyTableAsync(final TableName tableName
, final HTableDescriptor htd
)
1265 * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that
1266 * it may be a while before your schema change is updated across all of the table.
1267 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1268 * It may throw ExecutionException if there was an error while executing the operation
1269 * or TimeoutException in case the wait timeout was not long enough to allow the
1270 * operation to complete.
1272 * @param td description of the table
1273 * @throws IOException if a remote or network exception occurs
1274 * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the
1275 * operation to complete
1277 Future
<Void
> modifyTableAsync(TableDescriptor td
)
1281 * Shuts down the HBase cluster
1283 * @throws IOException if a remote or network exception occurs
1285 void shutdown() throws IOException
;
1288 * Shuts down the current HBase master only. Does not shutdown the cluster.
1290 * @throws IOException if a remote or network exception occurs
1293 void stopMaster() throws IOException
;
1296 * Check whether Master is in maintenance mode
1298 * @throws IOException if a remote or network exception occurs
1300 boolean isMasterInMaintenanceMode() throws IOException
;
1303 * Stop the designated regionserver
1305 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
1306 * <code>example.org:1234</code>
1307 * @throws IOException if a remote or network exception occurs
1309 void stopRegionServer(final String hostnamePort
) throws IOException
;
1312 * @return cluster status
1313 * @throws IOException if a remote or network exception occurs
1315 ClusterStatus
getClusterStatus() throws IOException
;
1318 * Get {@link RegionLoad} of all regions hosted on a regionserver.
1320 * @param sn region server from which regionload is required.
1321 * @return region load map of all regions hosted on a region server
1322 * @throws IOException if a remote or network exception occurs
1324 Map
<byte[], RegionLoad
> getRegionLoad(ServerName sn
) throws IOException
;
1327 * Get {@link RegionLoad} of all regions hosted on a regionserver for a table.
1329 * @param sn region server from which regionload is required.
1330 * @param tableName get region load of regions belonging to the table
1331 * @return region load map of all regions of a table hosted on a region server
1332 * @throws IOException if a remote or network exception occurs
1334 Map
<byte[], RegionLoad
> getRegionLoad(ServerName sn
, TableName tableName
) throws IOException
;
1337 * @return Configuration used by the instance.
1339 Configuration
getConfiguration();
1342 * Create a new namespace. Blocks until namespace has been successfully created or an exception
1345 * @param descriptor descriptor which describes the new namespace
1347 void createNamespace(final NamespaceDescriptor descriptor
)
1351 * Create a new namespace
1353 * @param descriptor descriptor which describes the new namespace
1354 * @return the result of the async create namespace operation. Use Future.get(long, TimeUnit) to
1355 * wait on the operation to complete.
1357 Future
<Void
> createNamespaceAsync(final NamespaceDescriptor descriptor
)
1361 * Modify an existing namespace. Blocks until namespace has been successfully modified or an
1362 * exception is thrown.
1364 * @param descriptor descriptor which describes the new namespace
1366 void modifyNamespace(final NamespaceDescriptor descriptor
)
1370 * Modify an existing namespace
1372 * @param descriptor descriptor which describes the new namespace
1373 * @return the result of the async modify namespace operation. Use Future.get(long, TimeUnit) to
1374 * wait on the operation to complete.
1376 Future
<Void
> modifyNamespaceAsync(final NamespaceDescriptor descriptor
)
1380 * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
1381 * Blocks until namespace has been successfully deleted or an
1382 * exception is thrown.
1384 * @param name namespace name
1386 void deleteNamespace(final String name
) throws IOException
;
1389 * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
1391 * @param name namespace name
1392 * @return the result of the async delete namespace operation. Use Future.get(long, TimeUnit) to
1393 * wait on the operation to complete.
1395 Future
<Void
> deleteNamespaceAsync(final String name
) throws IOException
;
1398 * Get a namespace descriptor by name
1400 * @param name name of namespace descriptor
1401 * @return A descriptor
1402 * @throws org.apache.hadoop.hbase.NamespaceNotFoundException
1403 * @throws IOException if a remote or network exception occurs
1405 NamespaceDescriptor
getNamespaceDescriptor(final String name
)
1406 throws NamespaceNotFoundException
, IOException
;
1409 * List available namespace descriptors
1411 * @return List of descriptors
1413 NamespaceDescriptor
[] listNamespaceDescriptors()
1417 * Get list of table descriptors by namespace
1419 * @param name namespace name
1420 * @return HTD[] the read-only tableDescriptors
1421 * @throws IOException
1422 * @deprecated since 2.0 version and will be removed in 3.0 version.
1423 * use {@link #listTableDescriptorsByNamespace(byte[])}
1426 HTableDescriptor
[] listTableDescriptorsByNamespace(final String name
)
1430 * Get list of table descriptors by namespace
1432 * @param name namespace name
1433 * @return returns a list of TableDescriptors
1434 * @throws IOException
1436 List
<TableDescriptor
> listTableDescriptorsByNamespace(final byte[] name
)
1440 * Get list of table names by namespace
1442 * @param name namespace name
1443 * @return The list of table names in the namespace
1444 * @throws IOException
1446 TableName
[] listTableNamesByNamespace(final String name
)
1450 * Get the regions of a given table.
1452 * @param tableName the name of the table
1453 * @return List of {@link HRegionInfo}.
1454 * @throws IOException
1456 List
<HRegionInfo
> getTableRegions(final TableName tableName
)
1460 void close() throws IOException
;
1463 * Get tableDescriptors
1465 * @param tableNames List of table names
1466 * @return HTD[] the read-only tableDescriptors
1467 * @throws IOException if a remote or network exception occurs
1468 * @deprecated since 2.0 version and will be removed in 3.0 version.
1469 * use {@link #listTableDescriptors(List)}
1472 HTableDescriptor
[] getTableDescriptorsByTableName(List
<TableName
> tableNames
)
1476 * Get tableDescriptors
1478 * @param tableNames List of table names
1479 * @return returns a list of TableDescriptors
1480 * @throws IOException if a remote or network exception occurs
1482 List
<TableDescriptor
> listTableDescriptors(List
<TableName
> tableNames
)
1486 * Get tableDescriptors
1488 * @param names List of table names
1489 * @return HTD[] the read-only tableDescriptors
1490 * @throws IOException if a remote or network exception occurs
1491 * @deprecated since 2.0 version and will be removed in 3.0 version.
1492 * use {@link #listTableDescriptors(List)}
1495 HTableDescriptor
[] getTableDescriptors(List
<String
> names
)
1500 * @param procId ID of the procedure to abort
1501 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1502 * @return true if aborted, false if procedure already completed or does not exist
1503 * @throws IOException
1505 boolean abortProcedure(
1507 final boolean mayInterruptIfRunning
) throws IOException
;
1510 * Abort a procedure but does not block and wait for it be completely removed.
1511 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1512 * It may throw ExecutionException if there was an error while executing the operation
1513 * or TimeoutException in case the wait timeout was not long enough to allow the
1514 * operation to complete.
1516 * @param procId ID of the procedure to abort
1517 * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
1518 * @return true if aborted, false if procedure already completed or does not exist
1519 * @throws IOException
1521 Future
<Boolean
> abortProcedureAsync(
1523 final boolean mayInterruptIfRunning
) throws IOException
;
1527 * @return procedure list
1528 * @throws IOException
1530 ProcedureInfo
[] listProcedures()
1536 * @throws IOException if a remote or network exception occurs
1538 LockInfo
[] listLocks()
1542 * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
1544 * Note that the actual rolling of the log writer is asynchronous and may not be complete when
1545 * this method returns. As a side effect of this call, the named region server may schedule
1546 * store flushes at the request of the wal.
1548 * @param serverName The servername of the regionserver.
1549 * @throws IOException if a remote or network exception occurs
1550 * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
1552 void rollWALWriter(ServerName serverName
) throws IOException
, FailedLogCloseException
;
1555 * Helper delegage to getClusterStatus().getMasterCoprocessors().
1556 * @return an array of master coprocessors
1557 * @see org.apache.hadoop.hbase.ClusterStatus#getMasterCoprocessors()
1559 String
[] getMasterCoprocessors() throws IOException
;
1562 * Get the current compaction state of a table. It could be in a major compaction, a minor
1563 * compaction, both, or none.
1565 * @param tableName table to examine
1566 * @return the current compaction state
1567 * @throws IOException if a remote or network exception occurs
1569 CompactionState
getCompactionState(final TableName tableName
)
1573 * Get the current compaction state of region. It could be in a major compaction, a minor
1574 * compaction, both, or none.
1576 * @param regionName region to examine
1577 * @return the current compaction state
1578 * @throws IOException if a remote or network exception occurs
1580 CompactionState
getCompactionStateForRegion(
1581 final byte[] regionName
) throws IOException
;
1584 * Get the timestamp of the last major compaction for the passed table
1586 * The timestamp of the oldest HFile resulting from a major compaction of that table,
1587 * or 0 if no such HFile could be found.
1589 * @param tableName table to examine
1590 * @return the last major compaction timestamp or 0
1591 * @throws IOException if a remote or network exception occurs
1593 long getLastMajorCompactionTimestamp(final TableName tableName
)
1597 * Get the timestamp of the last major compaction for the passed region.
1599 * The timestamp of the oldest HFile resulting from a major compaction of that region,
1600 * or 0 if no such HFile could be found.
1602 * @param regionName region to examine
1603 * @return the last major compaction timestamp or 0
1604 * @throws IOException if a remote or network exception occurs
1606 long getLastMajorCompactionTimestampForRegion(final byte[] regionName
)
1610 * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
1611 * taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
1612 * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
1613 * a different type or with different parameters) will fail with a {@link
1614 * org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate naming.
1615 * Snapshot names follow the same naming constraints as tables in HBase. See {@link
1616 * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1618 * @param snapshotName name of the snapshot to be created
1619 * @param tableName name of the table for which snapshot is created
1620 * @throws IOException if a remote or network exception occurs
1621 * @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if snapshot creation failed
1622 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1624 void snapshot(final String snapshotName
, final TableName tableName
)
1625 throws IOException
, SnapshotCreationException
, IllegalArgumentException
;
1628 * Create a timestamp consistent snapshot for the given table. Snapshots are considered unique
1629 * based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
1630 * different type or with different parameters) will fail with a {@link SnapshotCreationException}
1631 * indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in
1634 * @param snapshotName name of the snapshot to be created
1635 * @param tableName name of the table for which snapshot is created
1636 * @throws IOException if a remote or network exception occurs
1637 * @throws SnapshotCreationException if snapshot creation failed
1638 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1640 void snapshot(final byte[] snapshotName
, final TableName tableName
)
1641 throws IOException
, SnapshotCreationException
, IllegalArgumentException
;
1644 * Create typed snapshot of the table. Snapshots are considered unique based on <b>the name of the
1645 * snapshot</b>. Attempts to take a snapshot with the same name (even a different type or with
1646 * different parameters) will fail with a {@link SnapshotCreationException} indicating the
1647 * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See
1648 * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
1650 * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
1651 * snapshots stored on the cluster
1652 * @param tableName name of the table to snapshot
1653 * @param type type of snapshot to take
1654 * @throws IOException we fail to reach the master
1655 * @throws SnapshotCreationException if snapshot creation failed
1656 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1658 void snapshot(final String snapshotName
,
1659 final TableName tableName
,
1660 SnapshotType type
) throws IOException
, SnapshotCreationException
,
1661 IllegalArgumentException
;
1664 * Take a snapshot and wait for the server to complete that snapshot (blocking). Only a single
1665 * snapshot should be taken at a time for an instance of HBase, or results may be undefined (you
1666 * can tell multiple HBase clusters to snapshot at the same time, but only one at a time for a
1667 * single cluster). Snapshots are considered unique based on <b>the name of the snapshot</b>.
1668 * Attempts to take a snapshot with the same name (even a different type or with different
1669 * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming.
1670 * Snapshot names follow the same naming constraints as tables in HBase. See {@link
1671 * org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. You should probably
1672 * use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} or
1673 * {@link #snapshot(byte[], org.apache.hadoop.hbase.TableName)} unless you are sure about the type
1674 * of snapshot that you want to take.
1676 * @param snapshot snapshot to take
1677 * @throws IOException or we lose contact with the master.
1678 * @throws SnapshotCreationException if snapshot failed to be taken
1679 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1681 void snapshot(SnapshotDescription snapshot
)
1682 throws IOException
, SnapshotCreationException
, IllegalArgumentException
;
1685 * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a
1686 * single snapshot should be taken at a time, or results may be undefined.
1688 * @param snapshot snapshot to take
1689 * @throws IOException if the snapshot did not succeed or we lose contact with the master.
1690 * @throws SnapshotCreationException if snapshot creation failed
1691 * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
1693 void takeSnapshotAsync(SnapshotDescription snapshot
)
1694 throws IOException
, SnapshotCreationException
;
1697 * Check the current state of the passed snapshot. There are three possible states: <ol>
1698 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1699 * <li>finished with error - throws the exception that caused the snapshot to fail</li> </ol> The
1700 * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
1701 * run/started since the snapshot you are checking, you will receive an {@link
1702 * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
1704 * @param snapshot description of the snapshot to check
1705 * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
1707 * @throws IOException if we have a network issue
1708 * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed
1709 * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is
1712 boolean isSnapshotFinished(final SnapshotDescription snapshot
)
1713 throws IOException
, HBaseSnapshotException
, UnknownSnapshotException
;
1716 * Restore the specified snapshot on the original table. (The table must be disabled) If the
1717 * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
1718 * snapshot of the current table is taken before executing the restore operation. In case of
1719 * restore failure, the failsafe snapshot will be restored. If the restore completes without
1720 * problem the failsafe snapshot is deleted.
1722 * @param snapshotName name of the snapshot to restore
1723 * @throws IOException if a remote or network exception occurs
1724 * @throws org.apache.hadoop.hbase.snapshot.RestoreSnapshotException if snapshot failed to be
1726 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1728 void restoreSnapshot(final byte[] snapshotName
) throws IOException
, RestoreSnapshotException
;
1731 * Restore the specified snapshot on the original table. (The table must be disabled) If the
1732 * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
1733 * snapshot of the current table is taken before executing the restore operation. In case of
1734 * restore failure, the failsafe snapshot will be restored. If the restore completes without
1735 * problem the failsafe snapshot is deleted.
1737 * @param snapshotName name of the snapshot to restore
1738 * @throws IOException if a remote or network exception occurs
1739 * @throws RestoreSnapshotException if snapshot failed to be restored
1740 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1742 void restoreSnapshot(final String snapshotName
) throws IOException
, RestoreSnapshotException
;
1745 * Restore the specified snapshot on the original table. (The table must be disabled) If the
1746 * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
1747 * snapshot of the current table is taken before executing the restore operation. In case of
1748 * restore failure, the failsafe snapshot will be restored. If the restore completes without
1749 * problem the failsafe snapshot is deleted.
1751 * @param snapshotName name of the snapshot to restore
1752 * @throws IOException if a remote or network exception occurs
1753 * @throws RestoreSnapshotException if snapshot failed to be restored
1754 * @return the result of the async restore snapshot. You can use Future.get(long, TimeUnit)
1755 * to wait on the operation to complete.
1757 Future
<Void
> restoreSnapshotAsync(final String snapshotName
)
1758 throws IOException
, RestoreSnapshotException
;
1761 * Restore the specified snapshot on the original table. (The table must be disabled) If
1762 * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
1763 * executing the restore operation. In case of restore failure, the failsafe snapshot will be
1764 * restored. If the restore completes without problem the failsafe snapshot is deleted. The
1765 * failsafe snapshot name is configurable by using the property
1766 * "hbase.snapshot.restore.failsafe.name".
1768 * @param snapshotName name of the snapshot to restore
1769 * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
1770 * @throws IOException if a remote or network exception occurs
1771 * @throws RestoreSnapshotException if snapshot failed to be restored
1772 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1774 void restoreSnapshot(final byte[] snapshotName
, final boolean takeFailSafeSnapshot
)
1775 throws IOException
, RestoreSnapshotException
;
1778 * Restore the specified snapshot on the original table. (The table must be disabled) If
1779 * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
1780 * executing the restore operation. In case of restore failure, the failsafe snapshot will be
1781 * restored. If the restore completes without problem the failsafe snapshot is deleted. The
1782 * failsafe snapshot name is configurable by using the property
1783 * "hbase.snapshot.restore.failsafe.name".
1785 * @param snapshotName name of the snapshot to restore
1786 * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
1787 * @throws IOException if a remote or network exception occurs
1788 * @throws RestoreSnapshotException if snapshot failed to be restored
1789 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1791 void restoreSnapshot(final String snapshotName
, final boolean takeFailSafeSnapshot
)
1792 throws IOException
, RestoreSnapshotException
;
1795 * Restore the specified snapshot on the original table. (The table must be disabled) If
1796 * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
1797 * executing the restore operation. In case of restore failure, the failsafe snapshot will be
1798 * restored. If the restore completes without problem the failsafe snapshot is deleted. The
1799 * failsafe snapshot name is configurable by using the property
1800 * "hbase.snapshot.restore.failsafe.name".
1801 * @param snapshotName name of the snapshot to restore
1802 * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
1803 * @param restoreAcl true to restore acl of snapshot
1804 * @throws IOException if a remote or network exception occurs
1805 * @throws RestoreSnapshotException if snapshot failed to be restored
1806 * @throws IllegalArgumentException if the restore request is formatted incorrectly
1808 void restoreSnapshot(final String snapshotName
, final boolean takeFailSafeSnapshot
,
1809 final boolean restoreAcl
) throws IOException
, RestoreSnapshotException
;
1812 * Create a new table by cloning the snapshot content.
1814 * @param snapshotName name of the snapshot to be cloned
1815 * @param tableName name of the table where the snapshot will be restored
1816 * @throws IOException if a remote or network exception occurs
1817 * @throws TableExistsException if table to be created already exists
1818 * @throws RestoreSnapshotException if snapshot failed to be cloned
1819 * @throws IllegalArgumentException if the specified table has not a valid name
1821 void cloneSnapshot(final byte[] snapshotName
, final TableName tableName
)
1822 throws IOException
, TableExistsException
, RestoreSnapshotException
;
1825 * Create a new table by cloning the snapshot content.
1826 * @param snapshotName name of the snapshot to be cloned
1827 * @param tableName name of the table where the snapshot will be restored
1828 * @param restoreAcl true to clone acl into newly created table
1829 * @throws IOException if a remote or network exception occurs
1830 * @throws TableExistsException if table to be created already exists
1831 * @throws RestoreSnapshotException if snapshot failed to be cloned
1832 * @throws IllegalArgumentException if the specified table has not a valid name
1834 void cloneSnapshot(final String snapshotName
, final TableName tableName
, final boolean restoreAcl
)
1835 throws IOException
, TableExistsException
, RestoreSnapshotException
;
1838 * Create a new table by cloning the snapshot content.
1840 * @param snapshotName name of the snapshot to be cloned
1841 * @param tableName name of the table where the snapshot will be restored
1842 * @throws IOException if a remote or network exception occurs
1843 * @throws TableExistsException if table to be created already exists
1844 * @throws RestoreSnapshotException if snapshot failed to be cloned
1845 * @throws IllegalArgumentException if the specified table has not a valid name
1847 void cloneSnapshot(final String snapshotName
, final TableName tableName
)
1848 throws IOException
, TableExistsException
, RestoreSnapshotException
;
1851 * Create a new table by cloning the snapshot content, but does not block
1852 * and wait for it be completely cloned.
1853 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1854 * It may throw ExecutionException if there was an error while executing the operation
1855 * or TimeoutException in case the wait timeout was not long enough to allow the
1856 * operation to complete.
1858 * @param snapshotName name of the snapshot to be cloned
1859 * @param tableName name of the table where the snapshot will be restored
1860 * @throws IOException if a remote or network exception occurs
1861 * @throws TableExistsException if table to be cloned already exists
1862 * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit)
1863 * to wait on the operation to complete.
1865 Future
<Void
> cloneSnapshotAsync(final String snapshotName
, final TableName tableName
)
1866 throws IOException
, TableExistsException
;
1869 * Execute a distributed procedure on a cluster.
1871 * @param signature A distributed procedure is uniquely identified by its signature (default the
1872 * root ZK node name of the procedure).
1873 * @param instance The instance name of the procedure. For some procedures, this parameter is
1875 * @param props Property/Value pairs of properties passing to the procedure
1876 * @throws IOException
1878 void execProcedure(String signature
, String instance
, Map
<String
, String
> props
)
1882 * Execute a distributed procedure on a cluster.
1884 * @param signature A distributed procedure is uniquely identified by its signature (default the
1885 * root ZK node name of the procedure).
1886 * @param instance The instance name of the procedure. For some procedures, this parameter is
1888 * @param props Property/Value pairs of properties passing to the procedure
1889 * @return data returned after procedure execution. null if no return data.
1890 * @throws IOException
1892 byte[] execProcedureWithRet(String signature
, String instance
, Map
<String
, String
> props
)
1896 * Check the current state of the specified procedure. There are three possible states: <ol>
1897 * <li>running - returns <tt>false</tt></li> <li>finished - returns <tt>true</tt></li>
1898 * <li>finished with error - throws the exception that caused the procedure to fail</li> </ol>
1900 * @param signature The signature that uniquely identifies a procedure
1901 * @param instance The instance name of the procedure
1902 * @param props Property/Value pairs of properties passing to the procedure
1903 * @return true if the specified procedure is finished successfully, false if it is still running
1904 * @throws IOException if the specified procedure finished with error
1906 boolean isProcedureFinished(String signature
, String instance
, Map
<String
, String
> props
)
1910 * List completed snapshots.
1912 * @return a list of snapshot descriptors for completed snapshots
1913 * @throws IOException if a network error occurs
1915 List
<SnapshotDescription
> listSnapshots() throws IOException
;
1918 * List all the completed snapshots matching the given regular expression.
1920 * @param regex The regular expression to match against
1921 * @return - returns a List of SnapshotDescription
1922 * @throws IOException if a remote or network exception occurs
1924 List
<SnapshotDescription
> listSnapshots(String regex
) throws IOException
;
1927 * List all the completed snapshots matching the given pattern.
1929 * @param pattern The compiled regular expression to match against
1930 * @return - returns a List of SnapshotDescription
1931 * @throws IOException if a remote or network exception occurs
1933 List
<SnapshotDescription
> listSnapshots(Pattern pattern
) throws IOException
;
1936 * List all the completed snapshots matching the given table name regular expression and snapshot
1937 * name regular expression.
1938 * @param tableNameRegex The table name regular expression to match against
1939 * @param snapshotNameRegex The snapshot name regular expression to match against
1940 * @return - returns a List of completed SnapshotDescription
1941 * @throws IOException if a remote or network exception occurs
1943 List
<SnapshotDescription
> listTableSnapshots(String tableNameRegex
,
1944 String snapshotNameRegex
) throws IOException
;
1947 * List all the completed snapshots matching the given table name regular expression and snapshot
1948 * name regular expression.
1949 * @param tableNamePattern The compiled table name regular expression to match against
1950 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
1951 * @return - returns a List of completed SnapshotDescription
1952 * @throws IOException if a remote or network exception occurs
1954 List
<SnapshotDescription
> listTableSnapshots(Pattern tableNamePattern
,
1955 Pattern snapshotNamePattern
) throws IOException
;
1958 * Delete an existing snapshot.
1960 * @param snapshotName name of the snapshot
1961 * @throws IOException if a remote or network exception occurs
1963 void deleteSnapshot(final byte[] snapshotName
) throws IOException
;
1966 * Delete an existing snapshot.
1968 * @param snapshotName name of the snapshot
1969 * @throws IOException if a remote or network exception occurs
1971 void deleteSnapshot(final String snapshotName
) throws IOException
;
1974 * Delete existing snapshots whose names match the pattern passed.
1976 * @param regex The regular expression to match against
1977 * @throws IOException if a remote or network exception occurs
1979 void deleteSnapshots(final String regex
) throws IOException
;
1982 * Delete existing snapshots whose names match the pattern passed.
1984 * @param pattern pattern for names of the snapshot to match
1985 * @throws IOException if a remote or network exception occurs
1987 void deleteSnapshots(final Pattern pattern
) throws IOException
;
1990 * Delete all existing snapshots matching the given table name regular expression and snapshot
1991 * name regular expression.
1992 * @param tableNameRegex The table name regular expression to match against
1993 * @param snapshotNameRegex The snapshot name regular expression to match against
1994 * @throws IOException if a remote or network exception occurs
1996 void deleteTableSnapshots(String tableNameRegex
, String snapshotNameRegex
) throws IOException
;
1999 * Delete all existing snapshots matching the given table name regular expression and snapshot
2000 * name regular expression.
2001 * @param tableNamePattern The compiled table name regular expression to match against
2002 * @param snapshotNamePattern The compiled snapshot name regular expression to match against
2003 * @throws IOException if a remote or network exception occurs
2005 void deleteTableSnapshots(Pattern tableNamePattern
, Pattern snapshotNamePattern
)
2009 * Apply the new quota settings.
2011 * @param quota the quota settings
2012 * @throws IOException if a remote or network exception occurs
2014 void setQuota(final QuotaSettings quota
) throws IOException
;
2017 * Return a QuotaRetriever to list the quotas based on the filter.
2019 * @param filter the quota settings filter
2020 * @return the quota retriever
2021 * @throws IOException if a remote or network exception occurs
2023 QuotaRetriever
getQuotaRetriever(final QuotaFilter filter
) throws IOException
;
2026 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active
2027 * master. <p> The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access
2028 * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service
2029 * invocations: </p> <div style="background-color: #cccccc; padding: 2px">
2031 * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
2032 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
2033 * MyCallRequest request = MyCallRequest.newBuilder()
2036 * MyCallResponse response = service.myCall(null, request);
2037 * </pre></blockquote></div>
2039 * @return A MasterCoprocessorRpcChannel instance
2041 CoprocessorRpcChannel
coprocessorService();
2045 * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
2046 * connected to the passed region server.
2049 * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
2050 * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
2053 * <div style="background-color: #cccccc; padding: 2px">
2055 * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
2056 * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
2057 * MyCallRequest request = MyCallRequest.newBuilder()
2060 * MyCallResponse response = service.myCall(null, request);
2061 * </pre></blockquote></div>
2063 * @param sn the server name to which the endpoint call is made
2064 * @return A RegionServerCoprocessorRpcChannel instance
2066 CoprocessorRpcChannel
coprocessorService(ServerName sn
);
2070 * Update the configuration and trigger an online config change
2071 * on the regionserver
2072 * @param server : The server whose config needs to be updated.
2073 * @throws IOException
2075 void updateConfiguration(ServerName server
) throws IOException
;
2079 * Update the configuration and trigger an online config change
2080 * on all the regionservers
2081 * @throws IOException
2083 void updateConfiguration() throws IOException
;
2086 * Get the info port of the current master if one is available.
2087 * @return master info port
2088 * @throws IOException
2090 public int getMasterInfoPort() throws IOException
;
2093 * Compact a table. Asynchronous operation.
2095 * @param tableName table to compact
2096 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
2097 * @throws IOException
2098 * @throws InterruptedException
2100 void compact(final TableName tableName
, CompactType compactType
)
2101 throws IOException
, InterruptedException
;
2104 * Compact a column family within a table. Asynchronous operation.
2106 * @param tableName table to compact
2107 * @param columnFamily column family within a table
2108 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
2109 * @throws IOException if not a mob column family or if a remote or network exception occurs
2110 * @throws InterruptedException
2112 void compact(final TableName tableName
, final byte[] columnFamily
, CompactType compactType
)
2113 throws IOException
, InterruptedException
;
2116 * Major compact a table. Asynchronous operation.
2118 * @param tableName table to compact
2119 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
2120 * @throws IOException
2121 * @throws InterruptedException
2123 void majorCompact(final TableName tableName
, CompactType compactType
)
2124 throws IOException
, InterruptedException
;
2127 * Major compact a column family within a table. Asynchronous operation.
2129 * @param tableName table to compact
2130 * @param columnFamily column family within a table
2131 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
2132 * @throws IOException if not a mob column family or if a remote or network exception occurs
2133 * @throws InterruptedException
2135 void majorCompact(final TableName tableName
, final byte[] columnFamily
, CompactType compactType
)
2136 throws IOException
, InterruptedException
;
2139 * Get the current compaction state of a table. It could be in a compaction, or none.
2141 * @param tableName table to examine
2142 * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
2143 * @return the current compaction state
2144 * @throws IOException if a remote or network exception occurs
2146 CompactionState
getCompactionState(final TableName tableName
,
2147 CompactType compactType
) throws IOException
;
2150 * Return the set of supported security capabilities.
2151 * @throws IOException
2152 * @throws UnsupportedOperationException
2154 List
<SecurityCapability
> getSecurityCapabilities() throws IOException
;
2157 * Turn the Split or Merge switches on or off.
2159 * @param enabled enabled or not
2160 * @param synchronous If true, it waits until current split() call, if outstanding, to return.
2161 * @param switchTypes switchType list {@link MasterSwitchType}
2162 * @return Previous switch value array
2164 boolean[] setSplitOrMergeEnabled(final boolean enabled
, final boolean synchronous
,
2165 final MasterSwitchType
... switchTypes
) throws IOException
;
2168 * Query the current state of the switch
2170 * @return true if the switch is enabled, false otherwise.
2172 boolean isSplitOrMergeEnabled(final MasterSwitchType switchType
) throws IOException
;
2175 * Add a new replication peer for replicating data to slave cluster
2176 * @param peerId a short name that identifies the peer
2177 * @param peerConfig configuration for the replication slave cluster
2178 * @throws IOException
2180 default void addReplicationPeer(final String peerId
, final ReplicationPeerConfig peerConfig
)
2181 throws IOException
{
2185 * Remove a peer and stop the replication
2186 * @param peerId a short name that identifies the peer
2187 * @throws IOException
2189 default void removeReplicationPeer(final String peerId
) throws IOException
{
2193 * Restart the replication stream to the specified peer
2194 * @param peerId a short name that identifies the peer
2195 * @throws IOException
2197 default void enableReplicationPeer(final String peerId
) throws IOException
{
2201 * Stop the replication stream to the specified peer
2202 * @param peerId a short name that identifies the peer
2203 * @throws IOException
2205 default void disableReplicationPeer(final String peerId
) throws IOException
{
2209 * Returns the configured ReplicationPeerConfig for the specified peer
2210 * @param peerId a short name that identifies the peer
2211 * @return ReplicationPeerConfig for the peer
2212 * @throws IOException
2214 default ReplicationPeerConfig
getReplicationPeerConfig(final String peerId
) throws IOException
{
2215 return new ReplicationPeerConfig();
2219 * Update the peerConfig for the specified peer
2220 * @param peerId a short name that identifies the peer
2221 * @param peerConfig new config for the peer
2222 * @throws IOException
2224 default void updateReplicationPeerConfig(final String peerId
,
2225 final ReplicationPeerConfig peerConfig
) throws IOException
{
2229 * Append the replicable table-cf config of the specified peer
2230 * @param id a short that identifies the cluster
2231 * @param tableCfs A map from tableName to column family names
2232 * @throws ReplicationException
2233 * @throws IOException
2235 default void appendReplicationPeerTableCFs(String id
,
2236 Map
<TableName
, ?
extends Collection
<String
>> tableCfs
) throws ReplicationException
,
2241 * Remove some table-cfs from config of the specified peer
2242 * @param id a short name that identifies the cluster
2243 * @param tableCfs A map from tableName to column family names
2244 * @throws ReplicationException
2245 * @throws IOException
2247 default void removeReplicationPeerTableCFs(String id
,
2248 Map
<TableName
, ?
extends Collection
<String
>> tableCfs
) throws ReplicationException
,
2253 * Return a list of replication peers.
2254 * @return a list of replication peers description
2255 * @throws IOException
2257 default List
<ReplicationPeerDescription
> listReplicationPeers() throws IOException
{
2258 return new ArrayList
<>();
2262 * Return a list of replication peers.
2263 * @param regex The regular expression to match peer id
2264 * @return a list of replication peers description
2265 * @throws IOException
2267 default List
<ReplicationPeerDescription
> listReplicationPeers(String regex
) throws IOException
{
2268 return new ArrayList
<>();
2272 * Return a list of replication peers.
2273 * @param pattern The compiled regular expression to match peer id
2274 * @return a list of replication peers description
2275 * @throws IOException
2277 default List
<ReplicationPeerDescription
> listReplicationPeers(Pattern pattern
) throws IOException
{
2278 return new ArrayList
<>();
2282 * Mark a region server as draining to prevent additional regions from getting assigned to it.
2283 * @param servers List of region servers to drain.
2285 void drainRegionServers(List
<ServerName
> servers
) throws IOException
;
2288 * List region servers marked as draining to not get additional regions assigned to them.
2289 * @return List of draining region servers.
2291 List
<ServerName
> listDrainingRegionServers() throws IOException
;
2294 * Remove drain from a region server to allow additional regions assignments.
2295 * @param servers List of region servers to remove drain from.
2297 void removeDrainFromRegionServers(List
<ServerName
> servers
) throws IOException
;
2300 * Find all table and column families that are replicated from this cluster
2301 * @return the replicated table-cfs list of this cluster.
2303 List
<TableCFs
> listReplicatedTableCFs() throws IOException
;
2306 * Enable a table's replication switch.
2307 * @param tableName name of the table
2308 * @throws IOException if a remote or network exception occurs
2310 void enableTableReplication(final TableName tableName
) throws IOException
;
2313 * Disable a table's replication switch.
2314 * @param tableName name of the table
2315 * @throws IOException if a remote or network exception occurs
2317 void disableTableReplication(final TableName tableName
) throws IOException
;
2320 * Clear compacting queues on a regionserver.
2321 * @param sn the region server name
2322 * @param queues the set of queue name
2323 * @throws IOException if a remote or network exception occurs
2324 * @throws InterruptedException
2326 void clearCompactionQueues(final ServerName sn
, final Set
<String
> queues
)
2327 throws IOException
, InterruptedException
;