View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase;
20  
21  import java.io.DataInput;
22  import java.io.DataOutput;
23  import java.io.IOException;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.Collections;
27  import java.util.HashMap;
28  import java.util.HashSet;
29  import java.util.Iterator;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.Set;
33  import java.util.TreeMap;
34  import java.util.TreeSet;
35  import java.util.regex.Matcher;
36  
37  import org.apache.hadoop.hbase.util.ByteStringer;
38  import org.apache.commons.logging.Log;
39  import org.apache.commons.logging.LogFactory;
40  import org.apache.hadoop.hbase.classification.InterfaceAudience;
41  import org.apache.hadoop.hbase.classification.InterfaceStability;
42  import org.apache.hadoop.conf.Configuration;
43  import org.apache.hadoop.fs.Path;
44  import org.apache.hadoop.hbase.client.Durability;
45  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
46  import org.apache.hadoop.hbase.exceptions.DeserializationException;
47  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
48  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
49  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
51  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
52  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
53  import org.apache.hadoop.hbase.regionserver.BloomType;
54  import org.apache.hadoop.hbase.security.User;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.Writables;
57  import org.apache.hadoop.io.WritableComparable;
58  
59  /**
60   * HTableDescriptor contains the details about an HBase table  such as the descriptors of
61   * all the column families, is the table a catalog table, <code> -ROOT- </code> or
62   * <code> hbase:meta </code>, if the table is read only, the maximum size of the memstore,
63   * when the region split should occur, coprocessors associated with it etc...
64   */
65  @InterfaceAudience.Public
66  @InterfaceStability.Evolving
67  public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
68  
69    private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
70  
71    /**
72     *  Changes prior to version 3 were not recorded here.
73     *  Version 3 adds metadata as a map where keys and values are byte[].
74     *  Version 4 adds indexes
75     *  Version 5 removed transactional pollution -- e.g. indexes
76     *  Version 6 changed metadata to BytesBytesPair in PB
77     *  Version 7 adds table-level configuration
78     */
79    private static final byte TABLE_DESCRIPTOR_VERSION = 7;
80  
81    private TableName name = null;
82  
83    /**
84     * A map which holds the metadata information of the table. This metadata
85     * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
86     * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
87     */
88    private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
89      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
90  
91    /**
92     * A map which holds the configuration specific to the table.
93     * The keys of the map have the same names as config keys and override the defaults with
94     * table-specific settings. Example usage may be for compactions, etc.
95     */
96    private final Map<String, String> configuration = new HashMap<String, String>();
97  
98    public static final String SPLIT_POLICY = "SPLIT_POLICY";
99  
100   /**
101    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
102    * attribute which denotes the maximum size of the store file after which
103    * a region split occurs
104    *
105    * @see #getMaxFileSize()
106    */
107   public static final String MAX_FILESIZE = "MAX_FILESIZE";
108   private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
109     new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
110 
111   public static final String OWNER = "OWNER";
112   public static final ImmutableBytesWritable OWNER_KEY =
113     new ImmutableBytesWritable(Bytes.toBytes(OWNER));
114 
115   /**
116    * <em>INTERNAL</em> Used by rest interface to access this metadata
117    * attribute which denotes if the table is Read Only
118    *
119    * @see #isReadOnly()
120    */
121   public static final String READONLY = "READONLY";
122   private static final ImmutableBytesWritable READONLY_KEY =
123     new ImmutableBytesWritable(Bytes.toBytes(READONLY));
124 
125   /**
126    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
127    * attribute which denotes if the table is compaction enabled
128    *
129    * @see #isCompactionEnabled()
130    */
131   public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
132   private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
133     new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
134 
135   /**
136    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
137    * attribute which represents the maximum size of the memstore after which
138    * its contents are flushed onto the disk
139    *
140    * @see #getMemStoreFlushSize()
141    */
142   public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
143   private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
144     new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
145 
146   public static final String FLUSH_POLICY = "FLUSH_POLICY";
147 
148   /**
149    * <em>INTERNAL</em> Used by rest interface to access this metadata
150    * attribute which denotes if the table is a -ROOT- region or not
151    *
152    * @see #isRootRegion()
153    */
154   public static final String IS_ROOT = "IS_ROOT";
155   private static final ImmutableBytesWritable IS_ROOT_KEY =
156     new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
157 
158   /**
159    * <em>INTERNAL</em> Used by rest interface to access this metadata
160    * attribute which denotes if it is a catalog table, either
161    * <code> hbase:meta </code> or <code> -ROOT- </code>
162    *
163    * @see #isMetaRegion()
164    */
165   public static final String IS_META = "IS_META";
166   private static final ImmutableBytesWritable IS_META_KEY =
167     new ImmutableBytesWritable(Bytes.toBytes(IS_META));
168 
169   /**
170    * <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
171    * attribute which denotes if the deferred log flush option is enabled.
172    * @deprecated Use {@link #DURABILITY} instead.
173    */
174   @Deprecated
175   public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
176   @Deprecated
177   private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
178     new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
179 
180   /**
181    * <em>INTERNAL</em> {@link Durability} setting for the table.
182    */
183   public static final String DURABILITY = "DURABILITY";
184   private static final ImmutableBytesWritable DURABILITY_KEY =
185       new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
186 
187   /**
188    * <em>INTERNAL</em> number of region replicas for the table.
189    */
190   public static final String REGION_REPLICATION = "REGION_REPLICATION";
191   private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
192       new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
193 
194   /**
195    * <em>INTERNAL</em> flag to indicate whether or not the memstore should be replicated
196    * for read-replicas (CONSISTENCY => TIMELINE).
197    */
198   public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
199   private static final ImmutableBytesWritable REGION_MEMSTORE_REPLICATION_KEY =
200       new ImmutableBytesWritable(Bytes.toBytes(REGION_MEMSTORE_REPLICATION));
201 
202   /**
203    * <em>INTERNAL</em> Used by shell/rest interface to access this metadata
204    * attribute which denotes if the table should be treated by region normalizer.
205    *
206    * @see #isNormalizationEnabled()
207    */
208   public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED";
209   private static final ImmutableBytesWritable NORMALIZATION_ENABLED_KEY =
210     new ImmutableBytesWritable(Bytes.toBytes(NORMALIZATION_ENABLED));
211 
212   /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
213   private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
214 
215   public static final String PRIORITY = "PRIORITY";
216   private static final ImmutableBytesWritable PRIORITY_KEY =
217     new ImmutableBytesWritable(Bytes.toBytes(PRIORITY));
218 
219   /** Relative priority of the table used for rpc scheduling */
220   private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS;
221 
222   /*
223    *  The below are ugly but better than creating them each time till we
224    *  replace booleans being saved as Strings with plain booleans.  Need a
225    *  migration script to do this.  TODO.
226    */
227   private static final ImmutableBytesWritable FALSE =
228     new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
229 
230   private static final ImmutableBytesWritable TRUE =
231     new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
232 
233   private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
234 
235   /**
236    * Constant that denotes whether the table is READONLY by default and is false
237    */
238   public static final boolean DEFAULT_READONLY = false;
239 
240   /**
241    * Constant that denotes whether the table is compaction enabled by default
242    */
243   public static final boolean DEFAULT_COMPACTION_ENABLED = true;
244 
245   /**
246    * Constant that denotes whether the table is normalized by default.
247    */
248   public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
249 
250   /**
251    * Constant that denotes the maximum default size of the memstore after which
252    * the contents are flushed to the store files
253    */
254   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
255 
256   public static final int DEFAULT_REGION_REPLICATION = 1;
257 
258   public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true;
259 
260   private final static Map<String, String> DEFAULT_VALUES
261     = new HashMap<String, String>();
262   private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
263     = new HashSet<ImmutableBytesWritable>();
264   static {
265     DEFAULT_VALUES.put(MAX_FILESIZE,
266         String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
267     DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
268     DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
269         String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
270     DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
271         String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
272     DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
273     DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
274     DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
275     DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
276     for (String s : DEFAULT_VALUES.keySet()) {
277       RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
278     }
279     RESERVED_KEYWORDS.add(IS_ROOT_KEY);
280     RESERVED_KEYWORDS.add(IS_META_KEY);
281   }
282 
283   /**
284    * Cache of whether this is a meta table or not.
285    */
286   private volatile Boolean meta = null;
287   /**
288    * Cache of whether this is root table or not.
289    */
290   private volatile Boolean root = null;
291 
292   /**
293    * Durability setting for the table
294    */
295   private Durability durability = null;
296 
297   /**
298    * Maps column family name to the respective HColumnDescriptors
299    */
300   private final Map<byte [], HColumnDescriptor> families =
301     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
302 
303   /**
304    * <em> INTERNAL </em> Private constructor used internally creating table descriptors for
305    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
306    */
307   @InterfaceAudience.Private
308   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
309     setName(name);
310     for(HColumnDescriptor descriptor : families) {
311       this.families.put(descriptor.getName(), descriptor);
312     }
313   }
314 
315   /**
316    * <em> INTERNAL </em>Private constructor used internally creating table descriptors for
317    * catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
318    */
319   protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
320       Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
321     setName(name);
322     for(HColumnDescriptor descriptor : families) {
323       this.families.put(descriptor.getName(), descriptor);
324     }
325     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
326         values.entrySet()) {
327       setValue(entry.getKey(), entry.getValue());
328     }
329   }
330 
331   /**
332    * Default constructor which constructs an empty object.
333    * For deserializing an HTableDescriptor instance only.
334    * @deprecated As of release 0.96
335    *             (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
336    *             This will be removed in HBase 2.0.0.
337    *             Used by Writables and Writables are going away.
338    */
339   @Deprecated
340   public HTableDescriptor() {
341     super();
342   }
343 
344   /**
345    * Construct a table descriptor specifying a TableName object
346    * @param name Table name.
347    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
348    */
349   public HTableDescriptor(final TableName name) {
350     super();
351     setName(name);
352   }
353 
354   /**
355    * Construct a table descriptor specifying a byte array table name
356    * @param name Table name.
357    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
358    */
359   @Deprecated
360   public HTableDescriptor(final byte[] name) {
361     this(TableName.valueOf(name));
362   }
363 
364   /**
365    * Construct a table descriptor specifying a String table name
366    * @param name Table name.
367    * @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
368    */
369   @Deprecated
370   public HTableDescriptor(final String name) {
371     this(TableName.valueOf(name));
372   }
373 
374   /**
375    * Construct a table descriptor by cloning the descriptor passed as a parameter.
376    * <p>
377    * Makes a deep copy of the supplied descriptor.
378    * Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
379    * @param desc The descriptor.
380    */
381   public HTableDescriptor(final HTableDescriptor desc) {
382     super();
383     setName(desc.name);
384     setMetaFlags(this.name);
385     for (HColumnDescriptor c: desc.families.values()) {
386       this.families.put(c.getName(), new HColumnDescriptor(c));
387     }
388     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
389         desc.values.entrySet()) {
390       setValue(e.getKey(), e.getValue());
391     }
392     for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
393       this.configuration.put(e.getKey(), e.getValue());
394     }
395   }
396 
397   /*
398    * Set meta flags on this table.
399    * IS_ROOT_KEY is set if its a -ROOT- table
400    * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table
401    * Called by constructors.
402    * @param name
403    */
404   private void setMetaFlags(final TableName name) {
405     setMetaRegion(isRootRegion() ||
406       name.equals(TableName.META_TABLE_NAME));
407   }
408 
409   /**
410    * Check if the descriptor represents a <code> -ROOT- </code> region.
411    *
412    * @return true if this is a <code> -ROOT- </code> region
413    */
414   public boolean isRootRegion() {
415     if (this.root == null) {
416       this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
417     }
418     return this.root.booleanValue();
419   }
420 
421   /**
422    * <em> INTERNAL </em> Used to denote if the current table represents
423    * <code> -ROOT- </code> region. This is used internally by the
424    * HTableDescriptor constructors
425    *
426    * @param isRoot true if this is the <code> -ROOT- </code> region
427    */
428   protected void setRootRegion(boolean isRoot) {
429     // TODO: Make the value a boolean rather than String of boolean.
430     setValue(IS_ROOT_KEY, isRoot ? TRUE : FALSE);
431   }
432 
433   /**
434    * Checks if this table is <code> hbase:meta </code>
435    * region.
436    *
437    * @return true if this table is <code> hbase:meta </code>
438    * region
439    */
440   public boolean isMetaRegion() {
441     if (this.meta == null) {
442       this.meta = calculateIsMetaRegion();
443     }
444     return this.meta.booleanValue();
445   }
446 
447   private synchronized Boolean calculateIsMetaRegion() {
448     byte [] value = getValue(IS_META_KEY);
449     return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
450   }
451 
452   private boolean isSomething(final ImmutableBytesWritable key,
453       final boolean valueIfNull) {
454     byte [] value = getValue(key);
455     if (value != null) {
456       return Boolean.valueOf(Bytes.toString(value));
457     }
458     return valueIfNull;
459   }
460 
461   /**
462    * <em> INTERNAL </em> Used to denote if the current table represents
463    * <code> -ROOT- </code> or <code> hbase:meta </code> region. This is used
464    * internally by the HTableDescriptor constructors
465    *
466    * @param isMeta true if its either <code> -ROOT- </code> or
467    * <code> hbase:meta </code> region
468    */
469   protected void setMetaRegion(boolean isMeta) {
470     setValue(IS_META_KEY, isMeta? TRUE: FALSE);
471   }
472 
473   /**
474    * Checks if the table is a <code>hbase:meta</code> table
475    *
476    * @return true if table is <code> hbase:meta </code> region.
477    */
478   public boolean isMetaTable() {
479     return isMetaRegion() && !isRootRegion();
480   }
481 
482   /**
483    * Getter for accessing the metadata associated with the key
484    *
485    * @param key The key.
486    * @return The value.
487    * @see #values
488    */
489   public byte[] getValue(byte[] key) {
490     return getValue(new ImmutableBytesWritable(key));
491   }
492 
493   private byte[] getValue(final ImmutableBytesWritable key) {
494     ImmutableBytesWritable ibw = values.get(key);
495     if (ibw == null)
496       return null;
497     return ibw.get();
498   }
499 
500   /**
501    * Getter for accessing the metadata associated with the key
502    *
503    * @param key The key.
504    * @return The value.
505    * @see #values
506    */
507   public String getValue(String key) {
508     byte[] value = getValue(Bytes.toBytes(key));
509     if (value == null)
510       return null;
511     return Bytes.toString(value);
512   }
513 
514   /**
515    * Getter for fetching an unmodifiable {@link #values} map.
516    *
517    * @return unmodifiable map {@link #values}.
518    * @see #values
519    */
520   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
521     // shallow pointer copy
522     return Collections.unmodifiableMap(values);
523   }
524 
525   /**
526    * Setter for storing metadata as a (key, value) pair in {@link #values} map
527    *
528    * @param key The key.
529    * @param value The value.
530    * @see #values
531    */
532   public HTableDescriptor setValue(byte[] key, byte[] value) {
533     setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
534     return this;
535   }
536 
537   /*
538    * @param key The key.
539    * @param value The value.
540    */
541   private HTableDescriptor setValue(final ImmutableBytesWritable key,
542       final String value) {
543     setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
544     return this;
545   }
546 
547   /*
548    * Setter for storing metadata as a (key, value) pair in {@link #values} map
549    *
550    * @param key The key.
551    * @param value The value.
552    */
553   public HTableDescriptor setValue(final ImmutableBytesWritable key,
554       final ImmutableBytesWritable value) {
555     if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
556       boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
557       LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
558           "use " + DURABILITY + " instead");
559       setDurability(isDeferredFlush ? Durability.ASYNC_WAL : DEFAULT_DURABLITY);
560       return this;
561     }
562     values.put(key, value);
563     return this;
564   }
565 
566   /**
567    * Setter for storing metadata as a (key, value) pair in {@link #values} map
568    *
569    * @param key The key.
570    * @param value The value.
571    * @see #values
572    */
573   public HTableDescriptor setValue(String key, String value) {
574     if (value == null) {
575       remove(key);
576     } else {
577       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
578     }
579     return this;
580   }
581 
582   /**
583    * Remove metadata represented by the key from the {@link #values} map
584    *
585    * @param key Key whose key and value we're to remove from HTableDescriptor
586    * parameters.
587    */
588   public void remove(final String key) {
589     remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
590   }
591 
592   /**
593    * Remove metadata represented by the key from the {@link #values} map
594    *
595    * @param key Key whose key and value we're to remove from HTableDescriptor
596    * parameters.
597    */
598   public void remove(ImmutableBytesWritable key) {
599     values.remove(key);
600   }
601 
602   /**
603    * Remove metadata represented by the key from the {@link #values} map
604    *
605    * @param key Key whose key and value we're to remove from HTableDescriptor
606    * parameters.
607    */
608   public void remove(final byte [] key) {
609     remove(new ImmutableBytesWritable(key));
610   }
611 
612   /**
613    * Check if the readOnly flag of the table is set. If the readOnly flag is
614    * set then the contents of the table can only be read from but not modified.
615    *
616    * @return true if all columns in the table should be read only
617    */
618   public boolean isReadOnly() {
619     return isSomething(READONLY_KEY, DEFAULT_READONLY);
620   }
621 
622   /**
623    * Setting the table as read only sets all the columns in the table as read
624    * only. By default all tables are modifiable, but if the readOnly flag is
625    * set to true then the contents of the table can only be read but not modified.
626    *
627    * @param readOnly True if all of the columns in the table should be read
628    * only.
629    */
630   public HTableDescriptor setReadOnly(final boolean readOnly) {
631     return setValue(READONLY_KEY, readOnly? TRUE: FALSE);
632   }
633 
634   /**
635    * Check if the compaction enable flag of the table is true. If flag is
636    * false then no minor/major compactions will be done in real.
637    *
638    * @return true if table compaction enabled
639    */
640   public boolean isCompactionEnabled() {
641     return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED);
642   }
643 
644   /**
645    * Setting the table compaction enable flag.
646    *
647    * @param isEnable True if enable compaction.
648    */
649   public HTableDescriptor setCompactionEnabled(final boolean isEnable) {
650     setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE);
651     return this;
652   }
653 
654   /**
655    * Check if normalization enable flag of the table is true. If flag is
656    * false then no region normalizer won't attempt to normalize this table.
657    *
658    * @return true if region normalization is enabled for this table
659    */
660   public boolean isNormalizationEnabled() {
661     return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED);
662   }
663 
664   /**
665    * Setting the table normalization enable flag.
666    *
667    * @param isEnable True if enable normalization.
668    */
669   public HTableDescriptor setNormalizationEnabled(final boolean isEnable) {
670     setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE);
671     return this;
672   }
673 
674   /**
675    * Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
676    * @param durability enum value
677    */
678   public HTableDescriptor setDurability(Durability durability) {
679     this.durability = durability;
680     setValue(DURABILITY_KEY, durability.name());
681     return this;
682   }
683 
684   /**
685    * Returns the durability setting for the table.
686    * @return durability setting for the table.
687    */
688   public Durability getDurability() {
689     if (this.durability == null) {
690       byte[] durabilityValue = getValue(DURABILITY_KEY);
691       if (durabilityValue == null) {
692         this.durability = DEFAULT_DURABLITY;
693       } else {
694         try {
695           this.durability = Durability.valueOf(Bytes.toString(durabilityValue));
696         } catch (IllegalArgumentException ex) {
697           LOG.warn("Received " + ex + " because Durability value for HTableDescriptor"
698             + " is not known. Durability:" + Bytes.toString(durabilityValue));
699           this.durability = DEFAULT_DURABLITY;
700         }
701       }
702     }
703     return this.durability;
704   }
705 
706   /**
707    * Get the name of the table
708    *
709    * @return TableName
710    */
711   public TableName getTableName() {
712     return name;
713   }
714 
715   /**
716    * Get the name of the table as a byte array.
717    *
718    * @return name of table
719    * @deprecated Use {@link #getTableName()} instead
720    */
721   @Deprecated
722   public byte[] getName() {
723     return name.getName();
724   }
725 
726   /**
727    * Get the name of the table as a String
728    *
729    * @return name of table as a String
730    */
731   public String getNameAsString() {
732     return name.getNameAsString();
733   }
734 
735   /**
736    * This sets the class associated with the region split policy which
737    * determines when a region split should occur.  The class used by
738    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
739    * @param clazz the class name
740    */
741   public HTableDescriptor setRegionSplitPolicyClassName(String clazz) {
742     setValue(SPLIT_POLICY, clazz);
743     return this;
744   }
745 
746   /**
747    * This gets the class associated with the region split policy which
748    * determines when a region split should occur.  The class used by
749    * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
750    *
751    * @return the class name of the region split policy for this table.
752    * If this returns null, the default split policy is used.
753    */
754    public String getRegionSplitPolicyClassName() {
755     return getValue(SPLIT_POLICY);
756   }
757 
758   /**
759    * Set the name of the table.
760    *
761    * @param name name of table
762    */
763   @Deprecated
764   public HTableDescriptor setName(byte[] name) {
765     setName(TableName.valueOf(name));
766     return this;
767   }
768 
769   @Deprecated
770   public HTableDescriptor setName(TableName name) {
771     this.name = name;
772     setMetaFlags(this.name);
773     return this;
774   }
775 
776   /**
777    * Returns the maximum size upto which a region can grow to after which a region
778    * split is triggered. The region size is represented by the size of the biggest
779    * store file in that region.
780    *
781    * @return max hregion size for table, -1 if not set.
782    *
783    * @see #setMaxFileSize(long)
784    */
785   public long getMaxFileSize() {
786     byte [] value = getValue(MAX_FILESIZE_KEY);
787     if (value != null) {
788       return Long.parseLong(Bytes.toString(value));
789     }
790     return -1;
791   }
792 
793   /**
794    * Sets the maximum size upto which a region can grow to after which a region
795    * split is triggered. The region size is represented by the size of the biggest
796    * store file in that region, i.e. If the biggest store file grows beyond the
797    * maxFileSize, then the region split is triggered. This defaults to a value of
798    * 256 MB.
799    * <p>
800    * This is not an absolute value and might vary. Assume that a single row exceeds
801    * the maxFileSize then the storeFileSize will be greater than maxFileSize since
802    * a single row cannot be split across multiple regions
803    * </p>
804    *
805    * @param maxFileSize The maximum file size that a store file can grow to
806    * before a split is triggered.
807    */
808   public HTableDescriptor setMaxFileSize(long maxFileSize) {
809     setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
810     return this;
811   }
812 
813   /**
814    * Returns the size of the memstore after which a flush to filesystem is triggered.
815    *
816    * @return memory cache flush size for each hregion, -1 if not set.
817    *
818    * @see #setMemStoreFlushSize(long)
819    */
820   public long getMemStoreFlushSize() {
821     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
822     if (value != null) {
823       return Long.parseLong(Bytes.toString(value));
824     }
825     return -1;
826   }
827 
828   /**
829    * Represents the maximum size of the memstore after which the contents of the
830    * memstore are flushed to the filesystem. This defaults to a size of 64 MB.
831    *
832    * @param memstoreFlushSize memory cache flush size for each hregion
833    */
834   public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
835     setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
836     return this;
837   }
838 
839   /**
840    * This sets the class associated with the flush policy which determines determines the stores
841    * need to be flushed when flushing a region. The class used by default is defined in
842    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
843    * @param clazz the class name
844    */
845   public HTableDescriptor setFlushPolicyClassName(String clazz) {
846     setValue(FLUSH_POLICY, clazz);
847     return this;
848   }
849 
850   /**
851    * This gets the class associated with the flush policy which determines the stores need to be
852    * flushed when flushing a region. The class used by default is defined in
853    * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy}
854    * @return the class name of the flush policy for this table. If this returns null, the default
855    *         flush policy is used.
856    */
857   public String getFlushPolicyClassName() {
858     return getValue(FLUSH_POLICY);
859   }
860 
861   /**
862    * Adds a column family.
863    * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead.
864    * @param family HColumnDescriptor of family to add.
865    */
866   public HTableDescriptor addFamily(final HColumnDescriptor family) {
867     if (family.getName() == null || family.getName().length <= 0) {
868       throw new IllegalArgumentException("Family name cannot be null or empty");
869     }
870     if (hasFamily(family.getName())) {
871       throw new IllegalArgumentException("Family '" +
872         family.getNameAsString() + "' already exists so cannot be added");
873     }
874     this.families.put(family.getName(), family);
875     return this;
876   }
877 
878   /**
879    * Modifies the existing column family.
880    * @param family HColumnDescriptor of family to update
881    * @return this (for chained invocation)
882    */
883   public HTableDescriptor modifyFamily(final HColumnDescriptor family) {
884     if (family.getName() == null || family.getName().length <= 0) {
885       throw new IllegalArgumentException("Family name cannot be null or empty");
886     }
887     if (!hasFamily(family.getName())) {
888       throw new IllegalArgumentException("Column family '" + family.getNameAsString()
889         + "' does not exist");
890     }
891     this.families.put(family.getName(), family);
892     return this;
893   }
894 
895   /**
896    * Checks to see if this table contains the given column family
897    * @param familyName Family name or column name.
898    * @return true if the table contains the specified family name
899    */
900   public boolean hasFamily(final byte [] familyName) {
901     return families.containsKey(familyName);
902   }
903 
904   /**
905    * @return Name of this table and then a map of all of the column family
906    * descriptors.
907    * @see #getNameAsString()
908    */
909   @Override
910   public String toString() {
911     StringBuilder s = new StringBuilder();
912     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
913     s.append(getValues(true));
914     for (HColumnDescriptor f : families.values()) {
915       s.append(", ").append(f);
916     }
917     return s.toString();
918   }
919 
920   /**
921    * @return Name of this table and then a map of all of the column family
922    * descriptors (with only the non-default column family attributes)
923    */
924   public String toStringCustomizedValues() {
925     StringBuilder s = new StringBuilder();
926     s.append('\'').append(Bytes.toString(name.getName())).append('\'');
927     s.append(getValues(false));
928     for(HColumnDescriptor hcd : families.values()) {
929       s.append(", ").append(hcd.toStringCustomizedValues());
930     }
931     return s.toString();
932   }
933 
934   /**
935    * @return map of all table attributes formatted into string.
936    */
937   public String toStringTableAttributes() {
938    return getValues(true).toString();
939   }
940 
941   private StringBuilder getValues(boolean printDefaults) {
942     StringBuilder s = new StringBuilder();
943 
944     // step 1: set partitioning and pruning
945     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
946     Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
947     for (ImmutableBytesWritable k : values.keySet()) {
948       if (k == null || k.get() == null) continue;
949       String key = Bytes.toString(k.get());
950       // in this section, print out reserved keywords + coprocessor info
951       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
952         userKeys.add(k);
953         continue;
954       }
955       // only print out IS_ROOT/IS_META if true
956       String value = Bytes.toString(values.get(k).get());
957       if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
958         if (Boolean.valueOf(value) == false) continue;
959       }
960       // see if a reserved key is a default value. may not want to print it out
961       if (printDefaults
962           || !DEFAULT_VALUES.containsKey(key)
963           || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
964         reservedKeys.add(k);
965       }
966     }
967 
968     // early exit optimization
969     boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
970     if (!hasAttributes && configuration.isEmpty()) return s;
971 
972     s.append(", {");
973     // step 2: printing attributes
974     if (hasAttributes) {
975       s.append("TABLE_ATTRIBUTES => {");
976 
977       // print all reserved keys first
978       boolean printCommaForAttr = false;
979       for (ImmutableBytesWritable k : reservedKeys) {
980         String key = Bytes.toString(k.get());
981         String value = Bytes.toStringBinary(values.get(k).get());
982         if (printCommaForAttr) s.append(", ");
983         printCommaForAttr = true;
984         s.append(key);
985         s.append(" => ");
986         s.append('\'').append(value).append('\'');
987       }
988 
989       if (!userKeys.isEmpty()) {
990         // print all non-reserved, advanced config keys as a separate subset
991         if (printCommaForAttr) s.append(", ");
992         printCommaForAttr = true;
993         s.append(HConstants.METADATA).append(" => ");
994         s.append("{");
995         boolean printCommaForCfg = false;
996         for (ImmutableBytesWritable k : userKeys) {
997           String key = Bytes.toString(k.get());
998           String value = Bytes.toStringBinary(values.get(k).get());
999           if (printCommaForCfg) s.append(", ");
1000           printCommaForCfg = true;
1001           s.append('\'').append(key).append('\'');
1002           s.append(" => ");
1003           s.append('\'').append(value).append('\'');
1004         }
1005         s.append("}");
1006       }
1007     }
1008 
1009     // step 3: printing all configuration:
1010     if (!configuration.isEmpty()) {
1011       if (hasAttributes) {
1012         s.append(", ");
1013       }
1014       s.append(HConstants.CONFIGURATION).append(" => ");
1015       s.append('{');
1016       boolean printCommaForConfig = false;
1017       for (Map.Entry<String, String> e : configuration.entrySet()) {
1018         if (printCommaForConfig) s.append(", ");
1019         printCommaForConfig = true;
1020         s.append('\'').append(e.getKey()).append('\'');
1021         s.append(" => ");
1022         s.append('\'').append(e.getValue()).append('\'');
1023       }
1024       s.append("}");
1025     }
1026     s.append("}"); // end METHOD
1027     return s;
1028   }
1029 
1030   /**
1031    * Compare the contents of the descriptor with another one passed as a parameter.
1032    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
1033    * contents of the descriptors are compared.
1034    *
1035    * @return true if the contents of the the two descriptors exactly match
1036    *
1037    * @see java.lang.Object#equals(java.lang.Object)
1038    */
1039   @Override
1040   public boolean equals(Object obj) {
1041     if (this == obj) {
1042       return true;
1043     }
1044     if (obj == null) {
1045       return false;
1046     }
1047     if (!(obj instanceof HTableDescriptor)) {
1048       return false;
1049     }
1050     return compareTo((HTableDescriptor)obj) == 0;
1051   }
1052 
1053 
1054   /**
1055    * @see java.lang.Object#hashCode()
1056    */
1057   @Override
1058   public int hashCode() {
1059     int result = this.name.hashCode();
1060     result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
1061     if (this.families != null && this.families.size() > 0) {
1062       for (HColumnDescriptor e: this.families.values()) {
1063         result ^= e.hashCode();
1064       }
1065     }
1066     result ^= values.hashCode();
1067     result ^= configuration.hashCode();
1068     return result;
1069   }
1070 
1071   /**
1072    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1073    * and is used for de-serialization of the HTableDescriptor over RPC
1074    * @deprecated Writables are going away.  Use pb {@link #parseFrom(byte[])} instead.
1075    */
1076   @Deprecated
1077   @Override
1078   public void readFields(DataInput in) throws IOException {
1079     int version = in.readInt();
1080     if (version < 3)
1081       throw new IOException("versions < 3 are not supported (and never existed!?)");
1082     // version 3+
1083     name = TableName.valueOf(Bytes.readByteArray(in));
1084     setRootRegion(in.readBoolean());
1085     setMetaRegion(in.readBoolean());
1086     values.clear();
1087     configuration.clear();
1088     int numVals = in.readInt();
1089     for (int i = 0; i < numVals; i++) {
1090       ImmutableBytesWritable key = new ImmutableBytesWritable();
1091       ImmutableBytesWritable value = new ImmutableBytesWritable();
1092       key.readFields(in);
1093       value.readFields(in);
1094       setValue(key, value);
1095     }
1096     families.clear();
1097     int numFamilies = in.readInt();
1098     for (int i = 0; i < numFamilies; i++) {
1099       HColumnDescriptor c = new HColumnDescriptor();
1100       c.readFields(in);
1101       families.put(c.getName(), c);
1102     }
1103     if (version >= 7) {
1104       int numConfigs = in.readInt();
1105       for (int i = 0; i < numConfigs; i++) {
1106         ImmutableBytesWritable key = new ImmutableBytesWritable();
1107         ImmutableBytesWritable value = new ImmutableBytesWritable();
1108         key.readFields(in);
1109         value.readFields(in);
1110         configuration.put(
1111           Bytes.toString(key.get(), key.getOffset(), key.getLength()),
1112           Bytes.toString(value.get(), value.getOffset(), value.getLength()));
1113       }
1114     }
1115   }
1116 
1117   /**
1118    * <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
1119    * and is used for serialization of the HTableDescriptor over RPC
1120    * @deprecated Writables are going away.
1121    * Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
1122    */
1123   @Deprecated
1124   @Override
1125   public void write(DataOutput out) throws IOException {
1126     out.writeInt(TABLE_DESCRIPTOR_VERSION);
1127     Bytes.writeByteArray(out, name.toBytes());
1128     out.writeBoolean(isRootRegion());
1129     out.writeBoolean(isMetaRegion());
1130     out.writeInt(values.size());
1131     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1132         values.entrySet()) {
1133       e.getKey().write(out);
1134       e.getValue().write(out);
1135     }
1136     out.writeInt(families.size());
1137     for(Iterator<HColumnDescriptor> it = families.values().iterator();
1138         it.hasNext(); ) {
1139       HColumnDescriptor family = it.next();
1140       family.write(out);
1141     }
1142     out.writeInt(configuration.size());
1143     for (Map.Entry<String, String> e : configuration.entrySet()) {
1144       new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
1145       new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
1146     }
1147   }
1148 
1149   // Comparable
1150 
1151   /**
1152    * Compares the descriptor with another descriptor which is passed as a parameter.
1153    * This compares the content of the two descriptors and not the reference.
1154    *
1155    * @return 0 if the contents of the descriptors are exactly matching,
1156    *         1 if there is a mismatch in the contents
1157    */
1158   @Override
1159   public int compareTo(final HTableDescriptor other) {
1160     int result = this.name.compareTo(other.name);
1161     if (result == 0) {
1162       result = families.size() - other.families.size();
1163     }
1164     if (result == 0 && families.size() != other.families.size()) {
1165       result = Integer.valueOf(families.size()).compareTo(
1166           Integer.valueOf(other.families.size()));
1167     }
1168     if (result == 0) {
1169       for (Iterator<HColumnDescriptor> it = families.values().iterator(),
1170           it2 = other.families.values().iterator(); it.hasNext(); ) {
1171         result = it.next().compareTo(it2.next());
1172         if (result != 0) {
1173           break;
1174         }
1175       }
1176     }
1177     if (result == 0) {
1178       // punt on comparison for ordering, just calculate difference
1179       result = this.values.hashCode() - other.values.hashCode();
1180       if (result < 0)
1181         result = -1;
1182       else if (result > 0)
1183         result = 1;
1184     }
1185     if (result == 0) {
1186       result = this.configuration.hashCode() - other.configuration.hashCode();
1187       if (result < 0)
1188         result = -1;
1189       else if (result > 0)
1190         result = 1;
1191     }
1192     return result;
1193   }
1194 
1195   /**
1196    * Returns an unmodifiable collection of all the {@link HColumnDescriptor}
1197    * of all the column families of the table.
1198    *
1199    * @return Immutable collection of {@link HColumnDescriptor} of all the
1200    * column families.
1201    */
1202   public Collection<HColumnDescriptor> getFamilies() {
1203     return Collections.unmodifiableCollection(this.families.values());
1204   }
1205 
1206   /**
1207    * Returns the configured replicas per region
1208    */
1209   public int getRegionReplication() {
1210     return getIntValue(REGION_REPLICATION_KEY, DEFAULT_REGION_REPLICATION);
1211   }
1212 
1213   private int getIntValue(ImmutableBytesWritable key, int defaultVal) {
1214     byte[] val = getValue(key);
1215     if (val == null || val.length == 0) {
1216       return defaultVal;
1217     }
1218     return Integer.parseInt(Bytes.toString(val));
1219   }
1220 
1221   /**
1222    * Sets the number of replicas per region.
1223    * @param regionReplication the replication factor per region
1224    */
1225   public HTableDescriptor setRegionReplication(int regionReplication) {
1226     setValue(REGION_REPLICATION_KEY,
1227         new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
1228     return this;
1229   }
1230 
1231   /**
1232    * @return true if the read-replicas memstore replication is enabled.
1233    */
1234   public boolean hasRegionMemstoreReplication() {
1235     return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION);
1236   }
1237 
1238   /**
1239    * Enable or Disable the memstore replication from the primary region to the replicas.
1240    * The replication will be used only for meta operations (e.g. flush, compaction, ...)
1241    *
1242    * @param memstoreReplication true if the new data written to the primary region
1243    *                                 should be replicated.
1244    *                            false if the secondaries can tollerate to have new
1245    *                                  data only when the primary flushes the memstore.
1246    */
1247   public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) {
1248     setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE);
1249     // If the memstore replication is setup, we do not have to wait for observing a flush event
1250     // from primary before starting to serve reads, because gaps from replication is not applicable
1251     setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
1252       Boolean.toString(memstoreReplication));
1253     return this;
1254   }
1255 
1256   public HTableDescriptor setPriority(int priority) {
1257     setValue(PRIORITY_KEY, Integer.toString(priority));
1258     return this;
1259   }
1260 
1261   public int getPriority() {
1262     return getIntValue(PRIORITY_KEY, DEFAULT_PRIORITY);
1263   }
1264 
1265   /**
1266    * Returns all the column family names of the current table. The map of
1267    * HTableDescriptor contains mapping of family name to HColumnDescriptors.
1268    * This returns all the keys of the family map which represents the column
1269    * family names of the table.
1270    *
1271    * @return Immutable sorted set of the keys of the families.
1272    */
1273   public Set<byte[]> getFamiliesKeys() {
1274     return Collections.unmodifiableSet(this.families.keySet());
1275   }
1276 
1277   /**
1278    * Returns an array all the {@link HColumnDescriptor} of the column families
1279    * of the table.
1280    *
1281    * @return Array of all the HColumnDescriptors of the current table
1282    *
1283    * @see #getFamilies()
1284    */
1285   public HColumnDescriptor[] getColumnFamilies() {
1286     Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
1287     return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
1288   }
1289 
1290 
1291   /**
1292    * Returns the HColumnDescriptor for a specific column family with name as
1293    * specified by the parameter column.
1294    *
1295    * @param column Column family name
1296    * @return Column descriptor for the passed family name or the family on
1297    * passed in column.
1298    */
1299   public HColumnDescriptor getFamily(final byte [] column) {
1300     return this.families.get(column);
1301   }
1302 
1303 
1304   /**
1305    * Removes the HColumnDescriptor with name specified by the parameter column
1306    * from the table descriptor
1307    *
1308    * @param column Name of the column family to be removed.
1309    * @return Column descriptor for the passed family name or the family on
1310    * passed in column.
1311    */
1312   public HColumnDescriptor removeFamily(final byte [] column) {
1313     return this.families.remove(column);
1314   }
1315 
1316 
1317   /**
1318    * Add a table coprocessor to this table. The coprocessor
1319    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1320    * or Endpoint.
1321    * It won't check if the class can be loaded or not.
1322    * Whether a coprocessor is loadable or not will be determined when
1323    * a region is opened.
1324    * @param className Full class name.
1325    * @throws IOException
1326    */
1327   public HTableDescriptor addCoprocessor(String className) throws IOException {
1328     addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null);
1329     return this;
1330   }
1331 
1332 
1333   /**
1334    * Add a table coprocessor to this table. The coprocessor
1335    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1336    * or Endpoint.
1337    * It won't check if the class can be loaded or not.
1338    * Whether a coprocessor is loadable or not will be determined when
1339    * a region is opened.
1340    * @param jarFilePath Path of the jar file. If it's null, the class will be
1341    * loaded from default classloader.
1342    * @param className Full class name.
1343    * @param priority Priority
1344    * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
1345    * @throws IOException
1346    */
1347   public HTableDescriptor addCoprocessor(String className, Path jarFilePath,
1348                              int priority, final Map<String, String> kvs)
1349   throws IOException {
1350     if (hasCoprocessor(className)) {
1351       throw new IOException("Coprocessor " + className + " already exists.");
1352     }
1353     // validate parameter kvs
1354     StringBuilder kvString = new StringBuilder();
1355     if (kvs != null) {
1356       for (Map.Entry<String, String> e: kvs.entrySet()) {
1357         if (!e.getKey().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
1358           throw new IOException("Illegal parameter key = " + e.getKey());
1359         }
1360         if (!e.getValue().matches(HConstants.CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
1361           throw new IOException("Illegal parameter (" + e.getKey() +
1362               ") value = " + e.getValue());
1363         }
1364         if (kvString.length() != 0) {
1365           kvString.append(',');
1366         }
1367         kvString.append(e.getKey());
1368         kvString.append('=');
1369         kvString.append(e.getValue());
1370       }
1371     }
1372 
1373     // generate a coprocessor key
1374     int maxCoprocessorNumber = 0;
1375     Matcher keyMatcher;
1376     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1377         this.values.entrySet()) {
1378       keyMatcher =
1379           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1380               Bytes.toString(e.getKey().get()));
1381       if (!keyMatcher.matches()) {
1382         continue;
1383       }
1384       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)),
1385           maxCoprocessorNumber);
1386     }
1387     maxCoprocessorNumber++;
1388 
1389     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1390     String value = ((jarFilePath == null)? "" : jarFilePath.toString()) +
1391         "|" + className + "|" + Integer.toString(priority) + "|" +
1392         kvString.toString();
1393     setValue(key, value);
1394     return this;
1395   }
1396 
1397   /**
1398    * Add a table coprocessor to this table. The coprocessor
1399    * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
1400    * or Endpoint.
1401    * It won't check if the class can be loaded or not.
1402    * Whether a coprocessor is loadable or not will be determined when
1403    * a region is opened.
1404    * @param specStr The Coprocessor specification all in in one String formatted so matches
1405    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1406    * @throws IOException
1407    */
1408   public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
1409     String className = getCoprocessorClassNameFromSpecStr(specStr);
1410     if (className == null) {
1411       throw new IllegalArgumentException("Format does not match " +
1412         HConstants.CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr);
1413     }
1414     checkHasCoprocessor(className);
1415     return addCoprocessorToMap(specStr);
1416   }
1417 
1418   /**
1419    * Add coprocessor to values Map
1420    * @param specStr The Coprocessor specification all in in one String formatted so matches
1421    * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1422    * @return Returns <code>this</code>
1423    */
1424   private HTableDescriptor addCoprocessorToMap(final String specStr) {
1425     if (specStr == null) return this;
1426     // generate a coprocessor key
1427     int maxCoprocessorNumber = 0;
1428     Matcher keyMatcher;
1429     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1430         this.values.entrySet()) {
1431       keyMatcher =
1432           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1433               Bytes.toString(e.getKey().get()));
1434       if (!keyMatcher.matches()) {
1435         continue;
1436       }
1437       maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber);
1438     }
1439     maxCoprocessorNumber++;
1440     String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber);
1441     this.values.put(new ImmutableBytesWritable(Bytes.toBytes(key)),
1442       new ImmutableBytesWritable(Bytes.toBytes(specStr)));
1443     return this;
1444   }
1445 
1446   private void checkHasCoprocessor(final String className) throws IOException {
1447     if (hasCoprocessor(className)) {
1448       throw new IOException("Coprocessor " + className + " already exists.");
1449     }
1450   }
1451 
1452   /**
1453    * @param spec String formatted as per {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
1454    * @return Class parsed from passed in <code>spec</code> or null if no match or classpath found
1455    */
1456   private static String getCoprocessorClassNameFromSpecStr(final String spec) {
1457     Matcher matcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(spec);
1458     // Classname is the 2nd field
1459     return matcher != null && matcher.matches()? matcher.group(2).trim(): null;
1460   }
1461 
1462   /**
1463    * Check if the table has an attached co-processor represented by the name className
1464    *
1465    * @param className - Class name of the co-processor
1466    * @return true of the table has a co-processor className
1467    */
1468   public boolean hasCoprocessor(String className) {
1469     Matcher keyMatcher;
1470     Matcher valueMatcher;
1471     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
1472         this.values.entrySet()) {
1473       keyMatcher =
1474           HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
1475               Bytes.toString(e.getKey().get()));
1476       if (!keyMatcher.matches()) {
1477         continue;
1478       }
1479       valueMatcher =
1480         HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(
1481             Bytes.toString(e.getValue().get()));
1482       if (!valueMatcher.matches()) {
1483         continue;
1484       }
1485       // get className and compare
1486       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1487       if (clazz.equals(className.trim())) {
1488         return true;
1489       }
1490     }
1491     return false;
1492   }
1493 
1494   /**
1495    * Return the list of attached co-processor represented by their name className
1496    *
1497    * @return The list of co-processors classNames
1498    */
1499   public List<String> getCoprocessors() {
1500     List<String> result = new ArrayList<String>();
1501     Matcher keyMatcher;
1502     Matcher valueMatcher;
1503     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
1504       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
1505       if (!keyMatcher.matches()) {
1506         continue;
1507       }
1508       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1509           .toString(e.getValue().get()));
1510       if (!valueMatcher.matches()) {
1511         continue;
1512       }
1513       result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
1514     }
1515     return result;
1516   }
1517 
1518   /**
1519    * Remove a coprocessor from those set on the table
1520    * @param className Class name of the co-processor
1521    */
1522   public void removeCoprocessor(String className) {
1523     ImmutableBytesWritable match = null;
1524     Matcher keyMatcher;
1525     Matcher valueMatcher;
1526     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
1527         .entrySet()) {
1528       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
1529           .getKey().get()));
1530       if (!keyMatcher.matches()) {
1531         continue;
1532       }
1533       valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
1534           .toString(e.getValue().get()));
1535       if (!valueMatcher.matches()) {
1536         continue;
1537       }
1538       // get className and compare
1539       String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field
1540       // remove the CP if it is present
1541       if (clazz.equals(className.trim())) {
1542         match = e.getKey();
1543         break;
1544       }
1545     }
1546     // if we found a match, remove it
1547     if (match != null)
1548       remove(match);
1549   }
1550 
1551   /**
1552    * Returns the {@link Path} object representing the table directory under
1553    * path rootdir
1554    *
1555    * Deprecated use FSUtils.getTableDir() instead.
1556    *
1557    * @param rootdir qualified path of HBase root directory
1558    * @param tableName name of table
1559    * @return {@link Path} for table
1560    */
1561   @Deprecated
1562   public static Path getTableDir(Path rootdir, final byte [] tableName) {
1563     //This is bad I had to mirror code from FSUTils.getTableDir since
1564     //there is no module dependency between hbase-client and hbase-server
1565     TableName name = TableName.valueOf(tableName);
1566     return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1567               new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
1568   }
1569 
1570   /**
1571    * Table descriptor for <code>hbase:meta</code> catalog table
1572    * @deprecated Use TableDescriptors#get(TableName.META_TABLE_NAME) or
1573    * HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead.
1574    */
1575   @Deprecated
1576   public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
1577       TableName.META_TABLE_NAME,
1578       new HColumnDescriptor[] {
1579           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1580               // Ten is arbitrary number.  Keep versions to help debugging.
1581               .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS)
1582               .setInMemory(true)
1583               .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)
1584               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1585               // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1586               .setBloomFilterType(BloomType.NONE)
1587               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1588               // e.g. if using CombinedBlockCache (BucketCache).
1589               .setCacheDataInL1(true)
1590       });
1591 
1592   static {
1593     try {
1594       META_TABLEDESC.addCoprocessor(
1595           "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1596           null, Coprocessor.PRIORITY_SYSTEM, null);
1597     } catch (IOException ex) {
1598       //LOG.warn("exception in loading coprocessor for the hbase:meta table");
1599       throw new RuntimeException(ex);
1600     }
1601   }
1602 
1603   public final static String NAMESPACE_FAMILY_INFO = "info";
1604   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
1605   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
1606 
1607   /** Table descriptor for namespace table */
1608   public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
1609       TableName.NAMESPACE_TABLE_NAME,
1610       new HColumnDescriptor[] {
1611           new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
1612               // Ten is arbitrary number.  Keep versions to help debugging.
1613               .setMaxVersions(10)
1614               .setInMemory(true)
1615               .setBlocksize(8 * 1024)
1616               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1617               // Enable cache of data blocks in L1 if more than one caching tier deployed:
1618               // e.g. if using CombinedBlockCache (BucketCache).
1619               .setCacheDataInL1(true)
1620       });
1621 
1622   @Deprecated
1623   public HTableDescriptor setOwner(User owner) {
1624     return setOwnerString(owner != null ? owner.getShortName() : null);
1625   }
1626 
1627   // used by admin.rb:alter(table_name,*args) to update owner.
1628   @Deprecated
1629   public HTableDescriptor setOwnerString(String ownerString) {
1630     if (ownerString != null) {
1631       setValue(OWNER_KEY, ownerString);
1632     } else {
1633       remove(OWNER_KEY);
1634     }
1635     return this;
1636   }
1637 
1638   @Deprecated
1639   public String getOwnerString() {
1640     if (getValue(OWNER_KEY) != null) {
1641       return Bytes.toString(getValue(OWNER_KEY));
1642     }
1643     // Note that every table should have an owner (i.e. should have OWNER_KEY set).
1644     // hbase:meta and -ROOT- should return system user as owner, not null (see
1645     // MasterFileSystem.java:bootstrap()).
1646     return null;
1647   }
1648 
1649   /**
1650    * @return This instance serialized with pb with pb magic prefix
1651    * @see #parseFrom(byte[])
1652    */
1653   public byte [] toByteArray() {
1654     return ProtobufUtil.prependPBMagic(convert().toByteArray());
1655   }
1656 
1657   /**
1658    * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
1659    * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
1660    * @throws DeserializationException
1661    * @throws IOException
1662    * @see #toByteArray()
1663    */
1664   public static HTableDescriptor parseFrom(final byte [] bytes)
1665   throws DeserializationException, IOException {
1666     if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
1667       return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
1668     }
1669     int pblen = ProtobufUtil.lengthOfPBMagic();
1670     TableSchema.Builder builder = TableSchema.newBuilder();
1671     TableSchema ts;
1672     try {
1673       ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
1674       ts = builder.build();
1675     } catch (IOException e) {
1676       throw new DeserializationException(e);
1677     }
1678     return convert(ts);
1679   }
1680 
1681   /**
1682    * @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
1683    */
1684   public TableSchema convert() {
1685     TableSchema.Builder builder = TableSchema.newBuilder();
1686     builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
1687     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
1688       BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
1689       aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
1690       aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));
1691       builder.addAttributes(aBuilder.build());
1692     }
1693     for (HColumnDescriptor hcd: getColumnFamilies()) {
1694       builder.addColumnFamilies(hcd.convert());
1695     }
1696     for (Map.Entry<String, String> e : this.configuration.entrySet()) {
1697       NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
1698       aBuilder.setName(e.getKey());
1699       aBuilder.setValue(e.getValue());
1700       builder.addConfiguration(aBuilder.build());
1701     }
1702     return builder.build();
1703   }
1704 
1705   /**
1706    * @param ts A pb TableSchema instance.
1707    * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
1708    */
1709   public static HTableDescriptor convert(final TableSchema ts) {
1710     List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
1711     HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
1712     int index = 0;
1713     for (ColumnFamilySchema cfs: list) {
1714       hcds[index++] = HColumnDescriptor.convert(cfs);
1715     }
1716     HTableDescriptor htd = new HTableDescriptor(
1717         ProtobufUtil.toTableName(ts.getTableName()),
1718         hcds);
1719     for (BytesBytesPair a: ts.getAttributesList()) {
1720       htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
1721     }
1722     for (NameStringPair a: ts.getConfigurationList()) {
1723       htd.setConfiguration(a.getName(), a.getValue());
1724     }
1725     return htd;
1726   }
1727 
1728   /**
1729    * Getter for accessing the configuration value by key
1730    */
1731   public String getConfigurationValue(String key) {
1732     return configuration.get(key);
1733   }
1734 
1735   /**
1736    * Getter for fetching an unmodifiable {@link #configuration} map.
1737    */
1738   public Map<String, String> getConfiguration() {
1739     // shallow pointer copy
1740     return Collections.unmodifiableMap(configuration);
1741   }
1742 
1743   /**
1744    * Setter for storing a configuration setting in {@link #configuration} map.
1745    * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
1746    * @param value String value. If null, removes the setting.
1747    */
1748   public HTableDescriptor setConfiguration(String key, String value) {
1749     if (value == null) {
1750       removeConfiguration(key);
1751     } else {
1752       configuration.put(key, value);
1753     }
1754     return this;
1755   }
1756 
1757   /**
1758    * Remove a config setting represented by the key from the {@link #configuration} map
1759    */
1760   public void removeConfiguration(final String key) {
1761     configuration.remove(key);
1762   }
1763 
1764   public static HTableDescriptor metaTableDescriptor(final Configuration conf)
1765       throws IOException {
1766     HTableDescriptor metaDescriptor = new HTableDescriptor(
1767       TableName.META_TABLE_NAME,
1768       new HColumnDescriptor[] {
1769         new HColumnDescriptor(HConstants.CATALOG_FAMILY)
1770           .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
1771             HConstants.DEFAULT_HBASE_META_VERSIONS))
1772           .setInMemory(true)
1773           .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
1774             HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
1775           .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
1776           // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
1777           .setBloomFilterType(BloomType.NONE)
1778           .setCacheDataInL1(true)
1779          });
1780     metaDescriptor.addCoprocessor(
1781       "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
1782       null, Coprocessor.PRIORITY_SYSTEM, null);
1783     return metaDescriptor;
1784   }
1785 
1786 }