3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
19 package org
.apache
.hadoop
.hbase
;
21 import java
.io
.IOException
;
22 import java
.util
.ArrayList
;
23 import java
.util
.Collection
;
24 import java
.util
.Collections
;
25 import java
.util
.HashMap
;
26 import java
.util
.HashSet
;
27 import java
.util
.Iterator
;
28 import java
.util
.List
;
31 import java
.util
.TreeMap
;
32 import java
.util
.TreeSet
;
33 import java
.util
.regex
.Matcher
;
35 import org
.apache
.commons
.logging
.Log
;
36 import org
.apache
.commons
.logging
.LogFactory
;
37 import org
.apache
.hadoop
.fs
.Path
;
38 import org
.apache
.hadoop
.hbase
.classification
.InterfaceAudience
;
39 import org
.apache
.hadoop
.hbase
.classification
.InterfaceStability
;
40 import org
.apache
.hadoop
.hbase
.client
.Durability
;
41 import org
.apache
.hadoop
.hbase
.client
.RegionReplicaUtil
;
42 import org
.apache
.hadoop
.hbase
.exceptions
.DeserializationException
;
43 import org
.apache
.hadoop
.hbase
.security
.User
;
44 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.ProtobufUtil
;
45 import org
.apache
.hadoop
.hbase
.shaded
.protobuf
.generated
.HBaseProtos
.TableSchema
;
46 import org
.apache
.hadoop
.hbase
.util
.Bytes
;
49 * HTableDescriptor contains the details about an HBase table such as the descriptors of
50 * all the column families, is the table a catalog table, <code> -ROOT- </code> or
51 * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
52 * when the region split should occur, coprocessors associated with it etc...
54 @InterfaceAudience.Public
55 @InterfaceStability.Evolving
56 public class HTableDescriptor
implements Comparable
<HTableDescriptor
> {
58 private static final Log LOG
= LogFactory
.getLog(HTableDescriptor
.class);
60 private TableName name
= null;
63 * A map which holds the metadata information of the table. This metadata
64 * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
65 * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
67 private final Map
<Bytes
, Bytes
> values
= new HashMap
<>();
70 * A map which holds the configuration specific to the table.
71 * The keys of the map have the same names as config keys and override the defaults with
72 * table-specific settings. Example usage may be for compactions, etc.
74 private final Map
<String
, String
> configuration
= new HashMap
<>();
76 public static final String SPLIT_POLICY
= "SPLIT_POLICY";
79 * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
80 * attribute which denotes the maximum size of the store file after which
81 * a region split occurs
83 * @see #getMaxFileSize()
85 public static final String MAX_FILESIZE
= "MAX_FILESIZE";
86 private static final Bytes MAX_FILESIZE_KEY
=
87 new Bytes(Bytes
.toBytes(MAX_FILESIZE
));
89 public static final String OWNER
= "OWNER";
90 public static final Bytes OWNER_KEY
=
91 new Bytes(Bytes
.toBytes(OWNER
));
94 * <em>INTERNAL</em> Used by rest interface to access this metadata
95 * attribute which denotes if the table is Read Only
99 public static final String READONLY
= "READONLY";
100 private static final Bytes READONLY_KEY
=
101 new Bytes(Bytes
.toBytes(READONLY
));
104 * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
105 * attribute which denotes if the table is compaction enabled
107 * @see #isCompactionEnabled()
109 public static final String COMPACTION_ENABLED
= "COMPACTION_ENABLED";
110 private static final Bytes COMPACTION_ENABLED_KEY
=
111 new Bytes(Bytes
.toBytes(COMPACTION_ENABLED
));
114 * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
115 * attribute which represents the maximum size of the memstore after which
116 * its contents are flushed onto the disk
118 * @see #getMemStoreFlushSize()
120 public static final String MEMSTORE_FLUSHSIZE
= "MEMSTORE_FLUSHSIZE";
121 private static final Bytes MEMSTORE_FLUSHSIZE_KEY
=
122 new Bytes(Bytes
.toBytes(MEMSTORE_FLUSHSIZE
));
124 public static final String FLUSH_POLICY
= "FLUSH_POLICY";
127 * <em>INTERNAL</em> Used by rest interface to access this metadata
128 * attribute which denotes if the table is a -ROOT- region or not
130 * @see #isRootRegion()
132 public static final String IS_ROOT
= "IS_ROOT";
133 private static final Bytes IS_ROOT_KEY
=
134 new Bytes(Bytes
.toBytes(IS_ROOT
));
137 * <em>INTERNAL</em> Used by rest interface to access this metadata
138 * attribute which denotes if it is a catalog table, either
139 * <code> hbase:meta </code> or <code> -ROOT- </code>
141 * @see #isMetaRegion()
143 public static final String IS_META
= "IS_META";
144 private static final Bytes IS_META_KEY
=
145 new Bytes(Bytes
.toBytes(IS_META
));
148 * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
149 * attribute which denotes if the deferred log flush option is enabled.
150 * @deprecated Use {@link #DURABILITY} instead.
153 public static final String DEFERRED_LOG_FLUSH
= "DEFERRED_LOG_FLUSH";
155 private static final Bytes DEFERRED_LOG_FLUSH_KEY
=
156 new Bytes(Bytes
.toBytes(DEFERRED_LOG_FLUSH
));
159 * <em>INTERNAL</em> {@link Durability} setting for the table.
161 public static final String DURABILITY
= "DURABILITY";
162 private static final Bytes DURABILITY_KEY
=
163 new Bytes(Bytes
.toBytes("DURABILITY"));
166 * <em>INTERNAL</em> number of region replicas for the table.
168 public static final String REGION_REPLICATION
= "REGION_REPLICATION";
169 private static final Bytes REGION_REPLICATION_KEY
=
170 new Bytes(Bytes
.toBytes(REGION_REPLICATION
));
173 * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
174 * for read-replicas (CONSISTENCY => TIMELINE).
176 public static final String REGION_MEMSTORE_REPLICATION
= "REGION_MEMSTORE_REPLICATION";
177 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY
=
178 new Bytes(Bytes
.toBytes(REGION_MEMSTORE_REPLICATION
));
181 * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
182 * attribute which denotes if the table should be treated by region normalizer.
184 * @see #isNormalizationEnabled()
186 public static final String NORMALIZATION_ENABLED
= "NORMALIZATION_ENABLED";
187 private static final Bytes NORMALIZATION_ENABLED_KEY
=
188 new Bytes(Bytes
.toBytes(NORMALIZATION_ENABLED
));
190 /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
191 private static final Durability DEFAULT_DURABLITY
= Durability
.USE_DEFAULT
;
193 public static final String PRIORITY
= "PRIORITY";
194 private static final Bytes PRIORITY_KEY
=
195 new Bytes(Bytes
.toBytes(PRIORITY
));
197 /** Relative priority of the table used for rpc scheduling */
198 private static final int DEFAULT_PRIORITY
= HConstants
.NORMAL_QOS
;
201 * The below are ugly but better than creating them each time till we
202 * replace booleans being saved as Strings with plain booleans. Need a
203 * migration script to do this. TODO.
205 private static final Bytes FALSE
=
206 new Bytes(Bytes
.toBytes(Boolean
.FALSE
.toString()));
208 private static final Bytes TRUE
=
209 new Bytes(Bytes
.toBytes(Boolean
.TRUE
.toString()));
211 private static final boolean DEFAULT_DEFERRED_LOG_FLUSH
= false;
214 * Constant that denotes whether the table is READONLY by default and is false
216 public static final boolean DEFAULT_READONLY
= false;
219 * Constant that denotes whether the table is compaction enabled by default
221 public static final boolean DEFAULT_COMPACTION_ENABLED
= true;
224 * Constant that denotes whether the table is normalized by default.
226 public static final boolean DEFAULT_NORMALIZATION_ENABLED
= false;
229 * Constant that denotes the maximum default size of the memstore after which
230 * the contents are flushed to the store files
232 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE
= 1024*1024*128L;
234 public static final int DEFAULT_REGION_REPLICATION
= 1;
236 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION
= true;
238 private final static Map
<String
, String
> DEFAULT_VALUES
= new HashMap
<>();
239 private final static Set
<Bytes
> RESERVED_KEYWORDS
= new HashSet
<>();
242 DEFAULT_VALUES
.put(MAX_FILESIZE
,
243 String
.valueOf(HConstants
.DEFAULT_MAX_FILE_SIZE
));
244 DEFAULT_VALUES
.put(READONLY
, String
.valueOf(DEFAULT_READONLY
));
245 DEFAULT_VALUES
.put(MEMSTORE_FLUSHSIZE
,
246 String
.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE
));
247 DEFAULT_VALUES
.put(DEFERRED_LOG_FLUSH
,
248 String
.valueOf(DEFAULT_DEFERRED_LOG_FLUSH
));
249 DEFAULT_VALUES
.put(DURABILITY
, DEFAULT_DURABLITY
.name()); //use the enum name
250 DEFAULT_VALUES
.put(REGION_REPLICATION
, String
.valueOf(DEFAULT_REGION_REPLICATION
));
251 DEFAULT_VALUES
.put(NORMALIZATION_ENABLED
, String
.valueOf(DEFAULT_NORMALIZATION_ENABLED
));
252 DEFAULT_VALUES
.put(PRIORITY
, String
.valueOf(DEFAULT_PRIORITY
));
253 for (String s
: DEFAULT_VALUES
.keySet()) {
254 RESERVED_KEYWORDS
.add(new Bytes(Bytes
.toBytes(s
)));
256 RESERVED_KEYWORDS
.add(IS_ROOT_KEY
);
257 RESERVED_KEYWORDS
.add(IS_META_KEY
);
261 * Cache of whether this is a meta table or not.
263 private volatile Boolean meta
= null;
265 * Cache of whether this is root table or not.
267 private volatile Boolean root
= null;
270 * Durability setting for the table
272 private Durability durability
= null;
275 * Maps column family name to the respective HColumnDescriptors
277 private final Map
<byte [], HColumnDescriptor
> families
=
278 new TreeMap
<>(Bytes
.BYTES_RAWCOMPARATOR
);
281 * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
282 * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
284 @InterfaceAudience.Private
285 protected HTableDescriptor(final TableName name
, HColumnDescriptor
[] families
) {
287 for(HColumnDescriptor descriptor
: families
) {
288 this.families
.put(descriptor
.getName(), descriptor
);
293 * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
294 * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
296 protected HTableDescriptor(final TableName name
, HColumnDescriptor
[] families
,
297 Map
<Bytes
, Bytes
> values
) {
299 for(HColumnDescriptor descriptor
: families
) {
300 this.families
.put(descriptor
.getName(), descriptor
);
302 for (Map
.Entry
<Bytes
, Bytes
> entry
:
304 setValue(entry
.getKey(), entry
.getValue());
309 * Default constructor which constructs an empty object.
310 * For deserializing an HTableDescriptor instance only.
311 * @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
312 * This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
313 * Used by Writables and Writables are going away.
316 protected HTableDescriptor() {
321 * Construct a table descriptor specifying a TableName object
322 * @param name Table name.
323 * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug</a>
325 public HTableDescriptor(final TableName name
) {
331 * Construct a table descriptor specifying a byte array table name
332 * @param name Table name.
333 * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
336 public HTableDescriptor(final byte[] name
) {
337 this(TableName
.valueOf(name
));
341 * Construct a table descriptor specifying a String table name
342 * @param name Table name.
343 * @see <a href="https://issues.apache.org/jira/browse/HBASE-174">HADOOP-1581 (HBASE-174) HBASE: Un-openable tablename bug</a>
346 public HTableDescriptor(final String name
) {
347 this(TableName
.valueOf(name
));
351 * Construct a table descriptor by cloning the descriptor passed as a parameter.
353 * Makes a deep copy of the supplied descriptor.
354 * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
355 * @param desc The descriptor.
357 public HTableDescriptor(final HTableDescriptor desc
) {
358 this(desc
.name
, desc
);
362 * Construct a table descriptor by cloning the descriptor passed as a parameter
363 * but using a different table name.
365 * Makes a deep copy of the supplied descriptor.
366 * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
367 * @param name Table name.
368 * @param desc The descriptor.
370 public HTableDescriptor(final TableName name
, final HTableDescriptor desc
) {
373 setMetaFlags(this.name
);
374 for (HColumnDescriptor c
: desc
.families
.values()) {
375 this.families
.put(c
.getName(), new HColumnDescriptor(c
));
377 for (Map
.Entry
<Bytes
, Bytes
> e
:
378 desc
.values
.entrySet()) {
379 setValue(e
.getKey(), e
.getValue());
381 for (Map
.Entry
<String
, String
> e
: desc
.configuration
.entrySet()) {
382 this.configuration
.put(e
.getKey(), e
.getValue());
387 * Set meta flags on this table.
388 * IS_ROOT_KEY is set if its a -ROOT- table
389 * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
390 * Called by constructors.
393 private void setMetaFlags(final TableName name
) {
394 setMetaRegion(isRootRegion() ||
395 name
.equals(TableName
.META_TABLE_NAME
));
399 * Check if the descriptor represents a <code> -ROOT- </code> region.
401 * @return true if this is a <code> -ROOT- </code> region
403 public boolean isRootRegion() {
404 if (this.root
== null) {
405 this.root
= isSomething(IS_ROOT_KEY
, false)? Boolean
.TRUE
: Boolean
.FALSE
;
407 return this.root
.booleanValue();
411 * <em> INTERNAL </em> Used to denote if the current table represents
412 * <code> -ROOT- </code> region. This is used internally by the
413 * HTableDescriptor constructors
415 * @param isRoot true if this is the <code> -ROOT- </code> region
417 protected void setRootRegion(boolean isRoot
) {
418 // TODO: Make the value a boolean rather than String of boolean.
419 setValue(IS_ROOT_KEY
, isRoot? TRUE
: FALSE
);
423 * Checks if this table is <code> hbase:meta </code>
426 * @return true if this table is <code> hbase:meta </code>
429 public boolean isMetaRegion() {
430 if (this.meta
== null) {
431 this.meta
= calculateIsMetaRegion();
433 return this.meta
.booleanValue();
436 private synchronized Boolean
calculateIsMetaRegion() {
437 byte [] value
= getValue(IS_META_KEY
);
438 return (value
!= null)? Boolean
.valueOf(Bytes
.toString(value
)): Boolean
.FALSE
;
441 private boolean isSomething(final Bytes key
,
442 final boolean valueIfNull
) {
443 byte [] value
= getValue(key
);
445 return Boolean
.valueOf(Bytes
.toString(value
));
451 * <em> INTERNAL </em> Used to denote if the current table represents
452 * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
453 * internally by the HTableDescriptor constructors
455 * @param isMeta true if its either <code> -ROOT- </code> or
456 * <code> hbase:meta </code> region
458 protected void setMetaRegion(boolean isMeta
) {
459 setValue(IS_META_KEY
, isMeta? TRUE
: FALSE
);
463 * Checks if the table is a <code>hbase:meta</code> table
465 * @return true if table is <code> hbase:meta </code> region.
467 public boolean isMetaTable() {
468 return isMetaRegion() && !isRootRegion();
472 * Getter for accessing the metadata associated with the key
474 * @param key The key.
478 public byte[] getValue(byte[] key
) {
479 return getValue(new Bytes(key
));
482 private byte[] getValue(final Bytes key
) {
483 Bytes ibw
= values
.get(key
);
490 * Getter for accessing the metadata associated with the key
492 * @param key The key.
496 public String
getValue(String key
) {
497 byte[] value
= getValue(Bytes
.toBytes(key
));
500 return Bytes
.toString(value
);
504 * Getter for fetching an unmodifiable {@link #values} map.
506 * @return unmodifiable map {@link #values}.
509 public Map
<Bytes
, Bytes
> getValues() {
510 // shallow pointer copy
511 return Collections
.unmodifiableMap(values
);
515 * Setter for storing metadata as a (key, value) pair in {@link #values} map
517 * @param key The key.
518 * @param value The value.
521 public HTableDescriptor
setValue(byte[] key
, byte[] value
) {
522 setValue(new Bytes(key
), new Bytes(value
));
527 * @param key The key.
528 * @param value The value.
530 private HTableDescriptor
setValue(final Bytes key
,
531 final String value
) {
532 setValue(key
, new Bytes(Bytes
.toBytes(value
)));
537 * Setter for storing metadata as a (key, value) pair in {@link #values} map
539 * @param key The key.
540 * @param value The value.
542 public HTableDescriptor
setValue(final Bytes key
, final Bytes value
) {
543 if (key
.compareTo(DEFERRED_LOG_FLUSH_KEY
) == 0) {
544 boolean isDeferredFlush
= Boolean
.valueOf(Bytes
.toString(value
.get()));
545 LOG
.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH
+ " is deprecated, " +
546 "use " + DURABILITY
+ " instead");
547 setDurability(isDeferredFlush ? Durability
.ASYNC_WAL
: DEFAULT_DURABLITY
);
550 values
.put(key
, value
);
555 * Setter for storing metadata as a (key, value) pair in {@link #values} map
557 * @param key The key.
558 * @param value The value.
561 public HTableDescriptor
setValue(String key
, String value
) {
565 setValue(Bytes
.toBytes(key
), Bytes
.toBytes(value
));
571 * Remove metadata represented by the key from the {@link #values} map
573 * @param key Key whose key and value we're to remove from HTableDescriptor
576 public void remove(final String key
) {
577 remove(new Bytes(Bytes
.toBytes(key
)));
581 * Remove metadata represented by the key from the {@link #values} map
583 * @param key Key whose key and value we're to remove from HTableDescriptor
586 public void remove(Bytes key
) {
591 * Remove metadata represented by the key from the {@link #values} map
593 * @param key Key whose key and value we're to remove from HTableDescriptor
596 public void remove(final byte [] key
) {
597 remove(new Bytes(key
));
601 * Check if the readOnly flag of the table is set. If the readOnly flag is
602 * set then the contents of the table can only be read from but not modified.
604 * @return true if all columns in the table should be read only
606 public boolean isReadOnly() {
607 return isSomething(READONLY_KEY
, DEFAULT_READONLY
);
611 * Setting the table as read only sets all the columns in the table as read
612 * only. By default all tables are modifiable, but if the readOnly flag is
613 * set to true then the contents of the table can only be read but not modified.
615 * @param readOnly True if all of the columns in the table should be read
618 public HTableDescriptor
setReadOnly(final boolean readOnly
) {
619 return setValue(READONLY_KEY
, readOnly? TRUE
: FALSE
);
623 * Check if the compaction enable flag of the table is true. If flag is
624 * false then no minor/major compactions will be done in real.
626 * @return true if table compaction enabled
628 public boolean isCompactionEnabled() {
629 return isSomething(COMPACTION_ENABLED_KEY
, DEFAULT_COMPACTION_ENABLED
);
633 * Setting the table compaction enable flag.
635 * @param isEnable True if enable compaction.
637 public HTableDescriptor
setCompactionEnabled(final boolean isEnable
) {
638 setValue(COMPACTION_ENABLED_KEY
, isEnable ? TRUE
: FALSE
);
643 * Check if normalization enable flag of the table is true. If flag is
644 * false then no region normalizer won't attempt to normalize this table.
646 * @return true if region normalization is enabled for this table
648 public boolean isNormalizationEnabled() {
649 return isSomething(NORMALIZATION_ENABLED_KEY
, DEFAULT_NORMALIZATION_ENABLED
);
653 * Setting the table normalization enable flag.
655 * @param isEnable True if enable normalization.
657 public HTableDescriptor
setNormalizationEnabled(final boolean isEnable
) {
658 setValue(NORMALIZATION_ENABLED_KEY
, isEnable ? TRUE
: FALSE
);
663 * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
664 * @param durability enum value
666 public HTableDescriptor
setDurability(Durability durability
) {
667 this.durability
= durability
;
668 setValue(DURABILITY_KEY
, durability
.name());
673 * Returns the durability setting for the table.
674 * @return durability setting for the table.
676 public Durability
getDurability() {
677 if (this.durability
== null) {
678 byte[] durabilityValue
= getValue(DURABILITY_KEY
);
679 if (durabilityValue
== null) {
680 this.durability
= DEFAULT_DURABLITY
;
683 this.durability
= Durability
.valueOf(Bytes
.toString(durabilityValue
));
684 } catch (IllegalArgumentException ex
) {
685 LOG
.warn("Received " + ex
+ " because Durability value for HTableDescriptor"
686 + " is not known. Durability:" + Bytes
.toString(durabilityValue
));
687 this.durability
= DEFAULT_DURABLITY
;
691 return this.durability
;
695 * Get the name of the table
699 public TableName
getTableName() {
704 * Get the name of the table as a byte array.
706 * @return name of table
707 * @deprecated Use {@link #getTableName()} instead
710 public byte[] getName() {
711 return name
.getName();
715 * Get the name of the table as a String
717 * @return name of table as a String
719 public String
getNameAsString() {
720 return name
.getNameAsString();
724 * This sets the class associated with the region split policy which
725 * determines when a region split should occur. The class used by
726 * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
727 * @param clazz the class name
729 public HTableDescriptor
setRegionSplitPolicyClassName(String clazz
) {
730 setValue(SPLIT_POLICY
, clazz
);
735 * This gets the class associated with the region split policy which
736 * determines when a region split should occur. The class used by
737 * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
739 * @return the class name of the region split policy for this table.
740 * If this returns null, the default split policy is used.
742 public String
getRegionSplitPolicyClassName() {
743 return getValue(SPLIT_POLICY
);
747 * Set the name of the table.
749 * @param name name of table
752 public HTableDescriptor
setName(byte[] name
) {
753 setName(TableName
.valueOf(name
));
758 public HTableDescriptor
setName(TableName name
) {
760 setMetaFlags(this.name
);
765 * Returns the maximum size upto which a region can grow to after which a region
766 * split is triggered. The region size is represented by the size of the biggest
767 * store file in that region.
769 * @return max hregion size for table, -1 if not set.
771 * @see #setMaxFileSize(long)
773 public long getMaxFileSize() {
774 byte [] value
= getValue(MAX_FILESIZE_KEY
);
776 return Long
.parseLong(Bytes
.toString(value
));
782 * Sets the maximum size upto which a region can grow to after which a region
783 * split is triggered. The region size is represented by the size of the biggest
784 * store file in that region, i.e. If the biggest store file grows beyond the
785 * maxFileSize, then the region split is triggered. This defaults to a value of
788 * This is not an absolute value and might vary. Assume that a single row exceeds
789 * the maxFileSize then the storeFileSize will be greater than maxFileSize since
790 * a single row cannot be split across multiple regions
793 * @param maxFileSize The maximum file size that a store file can grow to
794 * before a split is triggered.
796 public HTableDescriptor
setMaxFileSize(long maxFileSize
) {
797 setValue(MAX_FILESIZE_KEY
, Long
.toString(maxFileSize
));
802 * Returns the size of the memstore after which a flush to filesystem is triggered.
804 * @return memory cache flush size for each hregion, -1 if not set.
806 * @see #setMemStoreFlushSize(long)
808 public long getMemStoreFlushSize() {
809 byte [] value
= getValue(MEMSTORE_FLUSHSIZE_KEY
);
811 return Long
.parseLong(Bytes
.toString(value
));
817 * Represents the maximum size of the memstore after which the contents of the
818 * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
820 * @param memstoreFlushSize memory cache flush size for each hregion
822 public HTableDescriptor
setMemStoreFlushSize(long memstoreFlushSize
) {
823 setValue(MEMSTORE_FLUSHSIZE_KEY
, Long
.toString(memstoreFlushSize
));
828 * This sets the class associated with the flush policy which determines determines the stores
829 * need to be flushed when flushing a region. The class used by default is defined in
830 * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
831 * @param clazz the class name
833 public HTableDescriptor
setFlushPolicyClassName(String clazz
) {
834 setValue(FLUSH_POLICY
, clazz
);
839 * This gets the class associated with the flush policy which determines the stores need to be
840 * flushed when flushing a region. The class used by default is defined in
841 * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
842 * @return the class name of the flush policy for this table. If this returns null, the default
843 * flush policy is used.
845 public String
getFlushPolicyClassName() {
846 return getValue(FLUSH_POLICY
);
850 * Adds a column family.
851 * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
852 * @param family HColumnDescriptor of family to add.
854 public HTableDescriptor
addFamily(final HColumnDescriptor family
) {
855 if (family
.getName() == null || family
.getName().length
<= 0) {
856 throw new IllegalArgumentException("Family name cannot be null or empty");
858 if (hasFamily(family
.getName())) {
859 throw new IllegalArgumentException("Family '" +
860 family
.getNameAsString() + "' already exists so cannot be added");
862 this.families
.put(family
.getName(), family
);
867 * Modifies the existing column family.
868 * @param family HColumnDescriptor of family to update
869 * @return this (for chained invocation)
871 public HTableDescriptor
modifyFamily(final HColumnDescriptor family
) {
872 if (family
.getName() == null || family
.getName().length
<= 0) {
873 throw new IllegalArgumentException("Family name cannot be null or empty");
875 if (!hasFamily(family
.getName())) {
876 throw new IllegalArgumentException("Column family '" + family
.getNameAsString()
877 + "' does not exist");
879 this.families
.put(family
.getName(), family
);
884 * Checks to see if this table contains the given column family
885 * @param familyName Family name or column name.
886 * @return true if the table contains the specified family name
888 public boolean hasFamily(final byte [] familyName
) {
889 return families
.containsKey(familyName
);
893 * @return Name of this table and then a map of all of the column family
895 * @see #getNameAsString()
898 public String
toString() {
899 StringBuilder s
= new StringBuilder();
900 s
.append('\'').append(Bytes
.toString(name
.getName())).append('\'');
901 s
.append(getValues(true));
902 for (HColumnDescriptor f
: families
.values()) {
903 s
.append(", ").append(f
);
909 * @return Name of this table and then a map of all of the column family
910 * descriptors (with only the non-default column family attributes)
912 public String
toStringCustomizedValues() {
913 StringBuilder s
= new StringBuilder();
914 s
.append('\'').append(Bytes
.toString(name
.getName())).append('\'');
915 s
.append(getValues(false));
916 for(HColumnDescriptor hcd
: families
.values()) {
917 s
.append(", ").append(hcd
.toStringCustomizedValues());
923 * @return map of all table attributes formatted into string.
925 public String
toStringTableAttributes() {
926 return getValues(true).toString();
929 private StringBuilder
getValues(boolean printDefaults
) {
930 StringBuilder s
= new StringBuilder();
932 // step 1: set partitioning and pruning
933 Set
<Bytes
> reservedKeys
= new TreeSet
<>();
934 Set
<Bytes
> userKeys
= new TreeSet
<>();
935 for (Map
.Entry
<Bytes
, Bytes
> entry
: values
.entrySet()) {
936 if (entry
.getKey() == null || entry
.getKey().get() == null) continue;
937 String key
= Bytes
.toString(entry
.getKey().get());
938 // in this section, print out reserved keywords + coprocessor info
939 if (!RESERVED_KEYWORDS
.contains(entry
.getKey()) && !key
.startsWith("coprocessor$")) {
940 userKeys
.add(entry
.getKey());
943 // only print out IS_ROOT/IS_META if true
944 String value
= Bytes
.toString(entry
.getValue().get());
945 if (key
.equalsIgnoreCase(IS_ROOT
) || key
.equalsIgnoreCase(IS_META
)) {
946 if (Boolean
.valueOf(value
) == false) continue;
948 // see if a reserved key is a default value. may not want to print it out
950 || !DEFAULT_VALUES
.containsKey(key
)
951 || !DEFAULT_VALUES
.get(key
).equalsIgnoreCase(value
)) {
952 reservedKeys
.add(entry
.getKey());
956 // early exit optimization
957 boolean hasAttributes
= !reservedKeys
.isEmpty() || !userKeys
.isEmpty();
958 if (!hasAttributes
&& configuration
.isEmpty()) return s
;
961 // step 2: printing attributes
963 s
.append("TABLE_ATTRIBUTES => {");
965 // print all reserved keys first
966 boolean printCommaForAttr
= false;
967 for (Bytes k
: reservedKeys
) {
968 String key
= Bytes
.toString(k
.get());
969 String value
= Bytes
.toStringBinary(values
.get(k
).get());
970 if (printCommaForAttr
) s
.append(", ");
971 printCommaForAttr
= true;
974 s
.append('\'').append(value
).append('\'');
977 if (!userKeys
.isEmpty()) {
978 // print all non-reserved, advanced config keys as a separate subset
979 if (printCommaForAttr
) s
.append(", ");
980 printCommaForAttr
= true;
981 s
.append(HConstants
.METADATA
).append(" => ");
983 boolean printCommaForCfg
= false;
984 for (Bytes k
: userKeys
) {
985 String key
= Bytes
.toString(k
.get());
986 String value
= Bytes
.toStringBinary(values
.get(k
).get());
987 if (printCommaForCfg
) s
.append(", ");
988 printCommaForCfg
= true;
989 s
.append('\'').append(key
).append('\'');
991 s
.append('\'').append(value
).append('\'');
997 // step 3: printing all configuration:
998 if (!configuration
.isEmpty()) {
1002 s
.append(HConstants
.CONFIGURATION
).append(" => ");
1004 boolean printCommaForConfig
= false;
1005 for (Map
.Entry
<String
, String
> e
: configuration
.entrySet()) {
1006 if (printCommaForConfig
) s
.append(", ");
1007 printCommaForConfig
= true;
1008 s
.append('\'').append(e
.getKey()).append('\'');
1010 s
.append('\'').append(e
.getValue()).append('\'');
1014 s
.append("}"); // end METHOD
1019 * Compare the contents of the descriptor with another one passed as a parameter.
1020 * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1021 * contents of the descriptors are compared.
1023 * @return true if the contents of the the two descriptors exactly match
1025 * @see java.lang.Object#equals(java.lang.Object)
1028 public boolean equals(Object obj
) {
1035 if (!(obj
instanceof HTableDescriptor
)) {
1038 return compareTo((HTableDescriptor
)obj
) == 0;
1042 * Detects whether replication has been already enabled on any of the column families of this
1044 * @return true if any of the column families has replication enabled.
1046 public boolean isReplicationEnabled() {
1047 // Go through each Column-Family descriptor and check if the
1048 // Replication has been enabled already.
1049 // Return 'true' if replication has been enabled on any CF,
1050 // otherwise return 'false'.
1052 boolean result
= false;
1053 Iterator
<HColumnDescriptor
> it
= this.families
.values().iterator();
1055 while (it
.hasNext()) {
1056 HColumnDescriptor tempHcd
= it
.next();
1057 if (tempHcd
.getScope() != HConstants
.REPLICATION_SCOPE_LOCAL
) {
1067 * @see java.lang.Object#hashCode()
1070 public int hashCode() {
1071 int result
= this.name
.hashCode();
1072 if (this.families
.size() > 0) {
1073 for (HColumnDescriptor e
: this.families
.values()) {
1074 result ^
= e
.hashCode();
1077 result ^
= values
.hashCode();
1078 result ^
= configuration
.hashCode();
1085 * Compares the descriptor with another descriptor which is passed as a parameter.
1086 * This compares the content of the two descriptors and not the reference.
1088 * @return 0 if the contents of the descriptors are exactly matching,
1089 * 1 if there is a mismatch in the contents
1092 public int compareTo(final HTableDescriptor other
) {
1093 int result
= this.name
.compareTo(other
.name
);
1095 result
= families
.size() - other
.families
.size();
1097 if (result
== 0 && families
.size() != other
.families
.size()) {
1098 result
= Integer
.valueOf(families
.size()).compareTo(
1099 Integer
.valueOf(other
.families
.size()));
1102 for (Iterator
<HColumnDescriptor
> it
= families
.values().iterator(),
1103 it2
= other
.families
.values().iterator(); it
.hasNext(); ) {
1104 result
= it
.next().compareTo(it2
.next());
1111 // punt on comparison for ordering, just calculate difference
1112 result
= this.values
.hashCode() - other
.values
.hashCode();
1115 else if (result
> 0)
1119 result
= this.configuration
.hashCode() - other
.configuration
.hashCode();
1122 else if (result
> 0)
1129 * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1130 * of all the column families of the table.
1132 * @return Immutable collection of {@link HColumnDescriptor} of all the
1135 public Collection
<HColumnDescriptor
> getFamilies() {
1136 return Collections
.unmodifiableCollection(this.families
.values());
1140 * Return true if there are at least one cf whose replication scope is serial.
1142 public boolean hasSerialReplicationScope() {
1143 for (HColumnDescriptor column
: getFamilies()){
1144 if (column
.getScope() == HConstants
.REPLICATION_SCOPE_SERIAL
){
1152 * Returns the configured replicas per region
1154 public int getRegionReplication() {
1155 return getIntValue(REGION_REPLICATION_KEY
, DEFAULT_REGION_REPLICATION
);
1158 private int getIntValue(Bytes key
, int defaultVal
) {
1159 byte[] val
= getValue(key
);
1160 if (val
== null || val
.length
== 0) {
1163 return Integer
.parseInt(Bytes
.toString(val
));
1167 * Sets the number of replicas per region.
1168 * @param regionReplication the replication factor per region
1170 public HTableDescriptor
setRegionReplication(int regionReplication
) {
1171 setValue(REGION_REPLICATION_KEY
,
1172 new Bytes(Bytes
.toBytes(Integer
.toString(regionReplication
))));
1177 * @return true if the read-replicas memstore replication is enabled.
1179 public boolean hasRegionMemstoreReplication() {
1180 return isSomething(REGION_MEMSTORE_REPLICATION_KEY
, DEFAULT_REGION_MEMSTORE_REPLICATION
);
1184 * Enable or Disable the memstore replication from the primary region to the replicas.
1185 * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1187 * @param memstoreReplication true if the new data written to the primary region
1188 * should be replicated.
1189 * false if the secondaries can tollerate to have new
1190 * data only when the primary flushes the memstore.
1192 public HTableDescriptor
setRegionMemstoreReplication(boolean memstoreReplication
) {
1193 setValue(REGION_MEMSTORE_REPLICATION_KEY
, memstoreReplication ? TRUE
: FALSE
);
1194 // If the memstore replication is setup, we do not have to wait for observing a flush event
1195 // from primary before starting to serve reads, because gaps from replication is not applicable
1196 setConfiguration(RegionReplicaUtil
.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY
,
1197 Boolean
.toString(memstoreReplication
));
1201 public HTableDescriptor
setPriority(int priority
) {
1202 setValue(PRIORITY_KEY
, Integer
.toString(priority
));
1206 public int getPriority() {
1207 return getIntValue(PRIORITY_KEY
, DEFAULT_PRIORITY
);
1211 * Returns all the column family names of the current table. The map of
1212 * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1213 * This returns all the keys of the family map which represents the column
1214 * family names of the table.
1216 * @return Immutable sorted set of the keys of the families.
1218 public Set
<byte[]> getFamiliesKeys() {
1219 return Collections
.unmodifiableSet(this.families
.keySet());
1223 * Returns the count of the column families of the table.
1225 * @return Count of column families of the table
1227 public int getColumnFamilyCount() {
1228 return families
.size();
1232 * Returns an array all the {@link HColumnDescriptor} of the column families
1235 * @return Array of all the HColumnDescriptors of the current table
1237 * @see #getFamilies()
1239 public HColumnDescriptor
[] getColumnFamilies() {
1240 Collection
<HColumnDescriptor
> hColumnDescriptors
= getFamilies();
1241 return hColumnDescriptors
.toArray(new HColumnDescriptor
[hColumnDescriptors
.size()]);
1246 * Returns the HColumnDescriptor for a specific column family with name as
1247 * specified by the parameter column.
1249 * @param column Column family name
1250 * @return Column descriptor for the passed family name or the family on
1253 public HColumnDescriptor
getFamily(final byte [] column
) {
1254 return this.families
.get(column
);
1259 * Removes the HColumnDescriptor with name specified by the parameter column
1260 * from the table descriptor
1262 * @param column Name of the column family to be removed.
1263 * @return Column descriptor for the passed family name or the family on
1266 public HColumnDescriptor
removeFamily(final byte [] column
) {
1267 return this.families
.remove(column
);
1271 * Add a table coprocessor to this table. The coprocessor
1272 * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1274 * It won't check if the class can be loaded or not.
1275 * Whether a coprocessor is loadable or not will be determined when
1276 * a region is opened.
1277 * @param className Full class name.
1278 * @throws IOException
1280 public HTableDescriptor
addCoprocessor(String className
) throws IOException
{
1281 addCoprocessor(className
, null, Coprocessor
.PRIORITY_USER
, null);
1286 * Add a table coprocessor to this table. The coprocessor
1287 * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1289 * It won't check if the class can be loaded or not.
1290 * Whether a coprocessor is loadable or not will be determined when
1291 * a region is opened.
1292 * @param jarFilePath Path of the jar file. If it's null, the class will be
1293 * loaded from default classloader.
1294 * @param className Full class name.
1295 * @param priority Priority
1296 * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1297 * @throws IOException
1299 public HTableDescriptor
addCoprocessor(String className
, Path jarFilePath
,
1300 int priority
, final Map
<String
, String
> kvs
)
1301 throws IOException
{
1302 checkHasCoprocessor(className
);
1304 // Validate parameter kvs and then add key/values to kvString.
1305 StringBuilder kvString
= new StringBuilder();
1307 for (Map
.Entry
<String
, String
> e
: kvs
.entrySet()) {
1308 if (!e
.getKey().matches(HConstants
.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN
)) {
1309 throw new IOException("Illegal parameter key = " + e
.getKey());
1311 if (!e
.getValue().matches(HConstants
.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN
)) {
1312 throw new IOException("Illegal parameter (" + e
.getKey() +
1313 ") value = " + e
.getValue());
1315 if (kvString
.length() != 0) {
1316 kvString
.append(',');
1318 kvString
.append(e
.getKey());
1319 kvString
.append('=');
1320 kvString
.append(e
.getValue());
1324 String value
= ((jarFilePath
== null)?
"" : jarFilePath
.toString()) +
1325 "|" + className
+ "|" + Integer
.toString(priority
) + "|" +
1326 kvString
.toString();
1327 return addCoprocessorToMap(value
);
1331 * Add a table coprocessor to this table. The coprocessor
1332 * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1334 * It won't check if the class can be loaded or not.
1335 * Whether a coprocessor is loadable or not will be determined when
1336 * a region is opened.
1337 * @param specStr The Coprocessor specification all in in one String formatted so matches
1338 * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1339 * @throws IOException
1341 public HTableDescriptor
addCoprocessorWithSpec(final String specStr
) throws IOException
{
1342 String className
= getCoprocessorClassNameFromSpecStr(specStr
);
1343 if (className
== null) {
1344 throw new IllegalArgumentException("Format does not match " +
1345 HConstants
.CP_HTD_ATTR_VALUE_PATTERN
+ ": " + specStr
);
1347 checkHasCoprocessor(className
);
1348 return addCoprocessorToMap(specStr
);
1351 private void checkHasCoprocessor(final String className
) throws IOException
{
1352 if (hasCoprocessor(className
)) {
1353 throw new IOException("Coprocessor " + className
+ " already exists.");
1358 * Add coprocessor to values Map
1359 * @param specStr The Coprocessor specification all in in one String formatted so matches
1360 * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1361 * @return Returns <code>this</code>
1363 private HTableDescriptor
addCoprocessorToMap(final String specStr
) {
1364 if (specStr
== null) return this;
1365 // generate a coprocessor key
1366 int maxCoprocessorNumber
= 0;
1368 for (Map
.Entry
<Bytes
, Bytes
> e
: this.values
.entrySet()) {
1369 keyMatcher
= HConstants
.CP_HTD_ATTR_KEY_PATTERN
.matcher(Bytes
.toString(e
.getKey().get()));
1370 if (!keyMatcher
.matches()) {
1373 maxCoprocessorNumber
= Math
.max(Integer
.parseInt(keyMatcher
.group(1)), maxCoprocessorNumber
);
1375 maxCoprocessorNumber
++;
1376 String key
= "coprocessor$" + Integer
.toString(maxCoprocessorNumber
);
1377 this.values
.put(new Bytes(Bytes
.toBytes(key
)), new Bytes(Bytes
.toBytes(specStr
)));
1382 * Check if the table has an attached co-processor represented by the name className
1384 * @param classNameToMatch - Class name of the co-processor
1385 * @return true of the table has a co-processor className
1387 public boolean hasCoprocessor(String classNameToMatch
) {
1389 for (Map
.Entry
<Bytes
, Bytes
> e
:
1390 this.values
.entrySet()) {
1392 HConstants
.CP_HTD_ATTR_KEY_PATTERN
.matcher(
1393 Bytes
.toString(e
.getKey().get()));
1394 if (!keyMatcher
.matches()) {
1397 String className
= getCoprocessorClassNameFromSpecStr(Bytes
.toString(e
.getValue().get()));
1398 if (className
== null) continue;
1399 if (className
.equals(classNameToMatch
.trim())) {
1407 * Return the list of attached co-processor represented by their name className
1409 * @return The list of co-processors classNames
1411 public List
<String
> getCoprocessors() {
1412 List
<String
> result
= new ArrayList
<>(this.values
.entrySet().size());
1414 for (Map
.Entry
<Bytes
, Bytes
> e
: this.values
.entrySet()) {
1415 keyMatcher
= HConstants
.CP_HTD_ATTR_KEY_PATTERN
.matcher(Bytes
.toString(e
.getKey().get()));
1416 if (!keyMatcher
.matches()) {
1419 String className
= getCoprocessorClassNameFromSpecStr(Bytes
.toString(e
.getValue().get()));
1420 if (className
== null) continue;
1421 result
.add(className
); // classname is the 2nd field
1427 * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1428 * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1430 private static String
getCoprocessorClassNameFromSpecStr(final String spec
) {
1431 Matcher matcher
= HConstants
.CP_HTD_ATTR_VALUE_PATTERN
.matcher(spec
);
1432 // Classname is the 2nd field
1433 return matcher
!= null && matcher
.matches()? matcher
.group(2).trim(): null;
1437 * Remove a coprocessor from those set on the table
1438 * @param className Class name of the co-processor
1440 public void removeCoprocessor(String className
) {
1443 Matcher valueMatcher
;
1444 for (Map
.Entry
<Bytes
, Bytes
> e
: this.values
1446 keyMatcher
= HConstants
.CP_HTD_ATTR_KEY_PATTERN
.matcher(Bytes
.toString(e
1448 if (!keyMatcher
.matches()) {
1451 valueMatcher
= HConstants
.CP_HTD_ATTR_VALUE_PATTERN
.matcher(Bytes
1452 .toString(e
.getValue().get()));
1453 if (!valueMatcher
.matches()) {
1456 // get className and compare
1457 String clazz
= valueMatcher
.group(2).trim(); // classname is the 2nd field
1458 // remove the CP if it is present
1459 if (clazz
.equals(className
.trim())) {
1464 // if we found a match, remove it
1470 * Returns the {@link Path} object representing the table directory under
1473 * Deprecated use FSUtils.getTableDir() instead.
1475 * @param rootdir qualified path of HBase root directory
1476 * @param tableName name of table
1477 * @return {@link Path} for table
1480 public static Path
getTableDir(Path rootdir
, final byte [] tableName
) {
1481 //This is bad I had to mirror code from FSUTils.getTableDir since
1482 //there is no module dependency between hbase-client and hbase-server
1483 TableName name
= TableName
.valueOf(tableName
);
1484 return new Path(rootdir
, new Path(HConstants
.BASE_NAMESPACE_DIR
,
1485 new Path(name
.getNamespaceAsString(), new Path(name
.getQualifierAsString()))));
1488 public final static String NAMESPACE_FAMILY_INFO
= "info";
1489 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES
= Bytes
.toBytes(NAMESPACE_FAMILY_INFO
);
1490 public final static byte[] NAMESPACE_COL_DESC_BYTES
= Bytes
.toBytes("d");
1492 /** Table descriptor for namespace table */
1493 public static final HTableDescriptor NAMESPACE_TABLEDESC
= new HTableDescriptor(
1494 TableName
.NAMESPACE_TABLE_NAME
,
1495 new HColumnDescriptor
[] {
1496 new HColumnDescriptor(NAMESPACE_FAMILY_INFO
)
1497 // Ten is arbitrary number. Keep versions to help debugging.
1500 .setBlocksize(8 * 1024)
1501 .setScope(HConstants
.REPLICATION_SCOPE_LOCAL
)
1502 // Enable cache of data blocks in L1 if more than one caching tier deployed:
1503 // e.g. if using CombinedBlockCache (BucketCache).
1504 .setCacheDataInL1(true)
1508 public HTableDescriptor
setOwner(User owner
) {
1509 return setOwnerString(owner
!= null ? owner
.getShortName() : null);
1512 // used by admin.rb:alter(table_name,*args) to update owner.
1514 public HTableDescriptor
setOwnerString(String ownerString
) {
1515 if (ownerString
!= null) {
1516 setValue(OWNER_KEY
, ownerString
);
1524 public String
getOwnerString() {
1525 if (getValue(OWNER_KEY
) != null) {
1526 return Bytes
.toString(getValue(OWNER_KEY
));
1528 // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1529 // hbase:meta and -ROOT- should return system user as owner, not null (see
1530 // MasterFileSystem.java:bootstrap()).
1535 * @return This instance serialized with pb with pb magic prefix
1536 * @see #parseFrom(byte[])
1538 public byte[] toByteArray() {
1539 return ProtobufUtil
.prependPBMagic(ProtobufUtil
.convertToTableSchema(this).toByteArray());
1543 * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1544 * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1545 * @throws DeserializationException
1546 * @throws IOException
1547 * @see #toByteArray()
1549 public static HTableDescriptor
parseFrom(final byte [] bytes
)
1550 throws DeserializationException
, IOException
{
1551 if (!ProtobufUtil
.isPBMagicPrefix(bytes
)) {
1552 throw new DeserializationException("Expected PB encoded HTableDescriptor");
1554 int pblen
= ProtobufUtil
.lengthOfPBMagic();
1555 TableSchema
.Builder builder
= TableSchema
.newBuilder();
1558 ProtobufUtil
.mergeFrom(builder
, bytes
, pblen
, bytes
.length
- pblen
);
1559 ts
= builder
.build();
1560 } catch (IOException e
) {
1561 throw new DeserializationException(e
);
1563 return ProtobufUtil
.convertToHTableDesc(ts
);
1567 * Getter for accessing the configuration value by key
1569 public String
getConfigurationValue(String key
) {
1570 return configuration
.get(key
);
1574 * Getter for fetching an unmodifiable {@link #configuration} map.
1576 public Map
<String
, String
> getConfiguration() {
1577 // shallow pointer copy
1578 return Collections
.unmodifiableMap(configuration
);
1582 * Setter for storing a configuration setting in {@link #configuration} map.
1583 * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1584 * @param value String value. If null, removes the setting.
1586 public HTableDescriptor
setConfiguration(String key
, String value
) {
1587 if (value
== null) {
1588 removeConfiguration(key
);
1590 configuration
.put(key
, value
);
1596 * Remove a config setting represented by the key from the {@link #configuration} map
1598 public void removeConfiguration(final String key
) {
1599 configuration
.remove(key
);