View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import java.io.IOException;
21  import java.io.UnsupportedEncodingException;
22  import java.net.URLDecoder;
23  import java.net.URLEncoder;
24  import java.util.ArrayList;
25  import java.util.Collection;
26  import java.util.List;
27  import java.util.Map;
28  import java.util.TreeMap;
29  import java.util.TreeSet;
30  import java.util.UUID;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.hbase.classification.InterfaceAudience;
35  import org.apache.hadoop.hbase.classification.InterfaceStability;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FileSystem;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.Cell;
40  import org.apache.hadoop.hbase.CellUtil;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.KeyValueUtil;
46  import org.apache.hadoop.hbase.client.HTable;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.RegionLocator;
49  import org.apache.hadoop.hbase.client.Table;
50  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
51  import org.apache.hadoop.hbase.io.compress.Compression;
52  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
53  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
54  import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
55  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
56  import org.apache.hadoop.hbase.io.hfile.HFile;
57  import org.apache.hadoop.hbase.io.hfile.HFileContext;
58  import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
59  import org.apache.hadoop.hbase.regionserver.BloomType;
60  import org.apache.hadoop.hbase.regionserver.HStore;
61  import org.apache.hadoop.hbase.regionserver.StoreFile;
62  import org.apache.hadoop.hbase.util.Bytes;
63  import org.apache.hadoop.io.NullWritable;
64  import org.apache.hadoop.io.SequenceFile;
65  import org.apache.hadoop.io.Text;
66  import org.apache.hadoop.mapreduce.Job;
67  import org.apache.hadoop.mapreduce.OutputFormat;
68  import org.apache.hadoop.mapreduce.RecordWriter;
69  import org.apache.hadoop.mapreduce.TaskAttemptContext;
70  import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
71  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
72  import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
73  
74  import com.google.common.annotations.VisibleForTesting;
75  
76  /**
77   * Writes HFiles. Passed Cells must arrive in order.
78   * Writes current time as the sequence id for the file. Sets the major compacted
79   * attribute on created @{link {@link HFile}s. Calling write(null,null) will forcibly roll
80   * all HFiles being written.
81   * <p>
82   * Using this class as part of a MapReduce job is best done
83   * using {@link #configureIncrementalLoad(Job, Table, RegionLocator)}.
84   */
85  @InterfaceAudience.Public
86  @InterfaceStability.Evolving
87  public class HFileOutputFormat2
88      extends FileOutputFormat<ImmutableBytesWritable, Cell> {
89    static Log LOG = LogFactory.getLog(HFileOutputFormat2.class);
90  
91    // The following constants are private since these are used by
92    // HFileOutputFormat2 to internally transfer data between job setup and
93    // reducer run using conf.
94    // These should not be changed by the client.
95    private static final String COMPRESSION_FAMILIES_CONF_KEY =
96        "hbase.hfileoutputformat.families.compression";
97    private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =
98        "hbase.hfileoutputformat.families.bloomtype";
99    private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =
100       "hbase.mapreduce.hfileoutputformat.blocksize";
101   private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =
102       "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";
103 
104   // This constant is public since the client can modify this when setting
105   // up their conf object and thus refer to this symbol.
106   // It is present for backwards compatibility reasons. Use it only to
107   // override the auto-detection of datablock encoding.
108   public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
109       "hbase.mapreduce.hfileoutputformat.datablock.encoding";
110 
111   @Override
112   public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(
113       final TaskAttemptContext context) throws IOException, InterruptedException {
114     return createRecordWriter(context);
115   }
116 
117   static <V extends Cell> RecordWriter<ImmutableBytesWritable, V>
118       createRecordWriter(final TaskAttemptContext context)
119           throws IOException {
120 
121     // Get the path of the temporary output file
122     final Path outputPath = FileOutputFormat.getOutputPath(context);
123     final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
124     final Configuration conf = context.getConfiguration();
125     final FileSystem fs = outputdir.getFileSystem(conf);
126     // These configs. are from hbase-*.xml
127     final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
128         HConstants.DEFAULT_MAX_FILE_SIZE);
129     // Invented config.  Add to hbase-*.xml if other than default compression.
130     final String defaultCompressionStr = conf.get("hfile.compression",
131         Compression.Algorithm.NONE.getName());
132     final Algorithm defaultCompression = AbstractHFileWriter
133         .compressionByName(defaultCompressionStr);
134     final boolean compactionExclude = conf.getBoolean(
135         "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
136 
137     // create a map from column family to the compression algorithm
138     final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
139     final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
140     final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
141 
142     String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
143     final Map<byte[], DataBlockEncoding> datablockEncodingMap
144         = createFamilyDataBlockEncodingMap(conf);
145     final DataBlockEncoding overriddenEncoding;
146     if (dataBlockEncodingStr != null) {
147       overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
148     } else {
149       overriddenEncoding = null;
150     }
151 
152     return new RecordWriter<ImmutableBytesWritable, V>() {
153       // Map of families to writers and how much has been output on the writer.
154       private final Map<byte [], WriterLength> writers =
155         new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
156       private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
157       private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
158       private boolean rollRequested = false;
159 
160       @Override
161       public void write(ImmutableBytesWritable row, V cell)
162           throws IOException {
163         KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
164 
165         // null input == user explicitly wants to flush
166         if (row == null && kv == null) {
167           rollWriters();
168           return;
169         }
170 
171         byte [] rowKey = CellUtil.cloneRow(kv);
172         long length = kv.getLength();
173         byte [] family = CellUtil.cloneFamily(kv);
174         WriterLength wl = this.writers.get(family);
175 
176         // If this is a new column family, verify that the directory exists
177         if (wl == null) {
178           fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
179         }
180 
181         // If any of the HFiles for the column families has reached
182         // maxsize, we need to roll all the writers
183         if (wl != null && wl.written + length >= maxsize) {
184           this.rollRequested = true;
185         }
186 
187         // This can only happen once a row is finished though
188         if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
189           rollWriters();
190         }
191 
192         // create a new WAL writer, if necessary
193         if (wl == null || wl.writer == null) {
194           wl = getNewWriter(family, conf);
195         }
196 
197         // we now have the proper WAL writer. full steam ahead
198         kv.updateLatestStamp(this.now);
199         wl.writer.append(kv);
200         wl.written += length;
201 
202         // Copy the row so we know when a row transition.
203         this.previousRow = rowKey;
204       }
205 
206       private void rollWriters() throws IOException {
207         for (WriterLength wl : this.writers.values()) {
208           if (wl.writer != null) {
209             LOG.info("Writer=" + wl.writer.getPath() +
210                 ((wl.written == 0)? "": ", wrote=" + wl.written));
211             close(wl.writer);
212           }
213           wl.writer = null;
214           wl.written = 0;
215         }
216         this.rollRequested = false;
217       }
218 
219       /* Create a new StoreFile.Writer.
220        * @param family
221        * @return A WriterLength, containing a new StoreFile.Writer.
222        * @throws IOException
223        */
224       @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",
225           justification="Not important")
226       private WriterLength getNewWriter(byte[] family, Configuration conf)
227           throws IOException {
228         WriterLength wl = new WriterLength();
229         Path familydir = new Path(outputdir, Bytes.toString(family));
230         Algorithm compression = compressionMap.get(family);
231         compression = compression == null ? defaultCompression : compression;
232         BloomType bloomType = bloomTypeMap.get(family);
233         bloomType = bloomType == null ? BloomType.NONE : bloomType;
234         Integer blockSize = blockSizeMap.get(family);
235         blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
236         DataBlockEncoding encoding = overriddenEncoding;
237         encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;
238         encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
239         Configuration tempConf = new Configuration(conf);
240         tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
241         HFileContextBuilder contextBuilder = new HFileContextBuilder()
242                                     .withCompression(compression)
243                                     .withChecksumType(HStore.getChecksumType(conf))
244                                     .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
245                                     .withBlockSize(blockSize);
246 
247         if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
248           contextBuilder.withIncludesTags(true);
249         }
250 
251         contextBuilder.withDataBlockEncoding(encoding);
252         HFileContext hFileContext = contextBuilder.build();
253                                     
254         wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
255             .withOutputDir(familydir).withBloomType(bloomType)
256             .withComparator(KeyValue.COMPARATOR)
257             .withFileContext(hFileContext).build();
258 
259         this.writers.put(family, wl);
260         return wl;
261       }
262 
263       private void close(final StoreFile.Writer w) throws IOException {
264         if (w != null) {
265           w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
266               Bytes.toBytes(System.currentTimeMillis()));
267           w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
268               Bytes.toBytes(context.getTaskAttemptID().toString()));
269           w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
270               Bytes.toBytes(true));
271           w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
272               Bytes.toBytes(compactionExclude));
273           w.appendTrackedTimestampsToMetadata();
274           w.close();
275         }
276       }
277 
278       @Override
279       public void close(TaskAttemptContext c)
280       throws IOException, InterruptedException {
281         for (WriterLength wl: this.writers.values()) {
282           close(wl.writer);
283         }
284       }
285     };
286   }
287 
288   /*
289    * Data structure to hold a Writer and amount of data written on it.
290    */
291   static class WriterLength {
292     long written = 0;
293     StoreFile.Writer writer = null;
294   }
295 
296   /**
297    * Return the start keys of all of the regions in this table,
298    * as a list of ImmutableBytesWritable.
299    */
300   private static List<ImmutableBytesWritable> getRegionStartKeys(RegionLocator table)
301   throws IOException {
302     byte[][] byteKeys = table.getStartKeys();
303     ArrayList<ImmutableBytesWritable> ret =
304       new ArrayList<ImmutableBytesWritable>(byteKeys.length);
305     for (byte[] byteKey : byteKeys) {
306       ret.add(new ImmutableBytesWritable(byteKey));
307     }
308     return ret;
309   }
310 
311   /**
312    * Write out a {@link SequenceFile} that can be read by
313    * {@link TotalOrderPartitioner} that contains the split points in startKeys.
314    */
315   @SuppressWarnings("deprecation")
316   private static void writePartitions(Configuration conf, Path partitionsPath,
317       List<ImmutableBytesWritable> startKeys) throws IOException {
318     LOG.info("Writing partition information to " + partitionsPath);
319     if (startKeys.isEmpty()) {
320       throw new IllegalArgumentException("No regions passed");
321     }
322 
323     // We're generating a list of split points, and we don't ever
324     // have keys < the first region (which has an empty start key)
325     // so we need to remove it. Otherwise we would end up with an
326     // empty reducer with index 0
327     TreeSet<ImmutableBytesWritable> sorted =
328       new TreeSet<ImmutableBytesWritable>(startKeys);
329 
330     ImmutableBytesWritable first = sorted.first();
331     if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
332       throw new IllegalArgumentException(
333           "First region of table should have empty start key. Instead has: "
334           + Bytes.toStringBinary(first.get()));
335     }
336     sorted.remove(first);
337 
338     // Write the actual file
339     FileSystem fs = partitionsPath.getFileSystem(conf);
340     SequenceFile.Writer writer = SequenceFile.createWriter(
341       fs, conf, partitionsPath, ImmutableBytesWritable.class,
342       NullWritable.class);
343 
344     try {
345       for (ImmutableBytesWritable startKey : sorted) {
346         writer.append(startKey, NullWritable.get());
347       }
348     } finally {
349       writer.close();
350     }
351   }
352 
353   /**
354    * Configure a MapReduce Job to perform an incremental load into the given
355    * table. This
356    * <ul>
357    *   <li>Inspects the table to configure a total order partitioner</li>
358    *   <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
359    *   <li>Sets the number of reduce tasks to match the current number of regions</li>
360    *   <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
361    *   <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
362    *     PutSortReducer)</li>
363    * </ul>
364    * The user should be sure to set the map output value class to either KeyValue or Put before
365    * running this function.
366    * 
367    * @deprecated Use {@link #configureIncrementalLoad(Job, Table, RegionLocator)} instead.
368    */
369   @Deprecated
370   public static void configureIncrementalLoad(Job job, HTable table)
371       throws IOException {
372     configureIncrementalLoad(job, table.getTableDescriptor(), table.getRegionLocator());
373   }
374 
375   /**
376    * Configure a MapReduce Job to perform an incremental load into the given
377    * table. This
378    * <ul>
379    *   <li>Inspects the table to configure a total order partitioner</li>
380    *   <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
381    *   <li>Sets the number of reduce tasks to match the current number of regions</li>
382    *   <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
383    *   <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
384    *     PutSortReducer)</li>
385    * </ul>
386    * The user should be sure to set the map output value class to either KeyValue or Put before
387    * running this function.
388    */
389   public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator)
390       throws IOException {
391     configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
392   }
393 
394   /**
395    * Configure a MapReduce Job to perform an incremental load into the given
396    * table. This
397    * <ul>
398    *   <li>Inspects the table to configure a total order partitioner</li>
399    *   <li>Uploads the partitions file to the cluster and adds it to the DistributedCache</li>
400    *   <li>Sets the number of reduce tasks to match the current number of regions</li>
401    *   <li>Sets the output key/value class to match HFileOutputFormat2's requirements</li>
402    *   <li>Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or
403    *     PutSortReducer)</li>
404    * </ul>
405    * The user should be sure to set the map output value class to either KeyValue or Put before
406    * running this function.
407    */
408   public static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,
409       RegionLocator regionLocator) throws IOException {
410     configureIncrementalLoad(job, tableDescriptor, regionLocator, HFileOutputFormat2.class);
411   }
412 
413   static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,
414       RegionLocator regionLocator, Class<? extends OutputFormat<?, ?>> cls) throws IOException,
415       UnsupportedEncodingException {
416     Configuration conf = job.getConfiguration();
417     job.setOutputKeyClass(ImmutableBytesWritable.class);
418     job.setOutputValueClass(KeyValue.class);
419     job.setOutputFormatClass(cls);
420 
421     // Based on the configured map output class, set the correct reducer to properly
422     // sort the incoming values.
423     // TODO it would be nice to pick one or the other of these formats.
424     if (KeyValue.class.equals(job.getMapOutputValueClass())) {
425       job.setReducerClass(KeyValueSortReducer.class);
426     } else if (Put.class.equals(job.getMapOutputValueClass())) {
427       job.setReducerClass(PutSortReducer.class);
428     } else if (Text.class.equals(job.getMapOutputValueClass())) {
429       job.setReducerClass(TextSortReducer.class);
430     } else {
431       LOG.warn("Unknown map output value type:" + job.getMapOutputValueClass());
432     }
433 
434     conf.setStrings("io.serializations", conf.get("io.serializations"),
435         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
436         KeyValueSerialization.class.getName());
437 
438     // Use table's region boundaries for TOP split points.
439     LOG.info("Looking up current regions for table " + tableDescriptor.getTableName());
440     List<ImmutableBytesWritable> startKeys = getRegionStartKeys(regionLocator);
441     LOG.info("Configuring " + startKeys.size() + " reduce partitions " +
442         "to match current region count");
443     job.setNumReduceTasks(startKeys.size());
444 
445     configurePartitioner(job, startKeys);
446     // Set compression algorithms based on column families
447     configureCompression(conf, tableDescriptor);
448     configureBloomType(tableDescriptor, conf);
449     configureBlockSize(tableDescriptor, conf);
450     configureDataBlockEncoding(tableDescriptor, conf);
451 
452     TableMapReduceUtil.addDependencyJars(job);
453     TableMapReduceUtil.initCredentials(job);
454     LOG.info("Incremental table " + regionLocator.getName() + " output configured.");
455   }
456   
457   public static void configureIncrementalLoadMap(Job job, Table table) throws IOException {
458     Configuration conf = job.getConfiguration();
459 
460     job.setOutputKeyClass(ImmutableBytesWritable.class);
461     job.setOutputValueClass(KeyValue.class);
462     job.setOutputFormatClass(HFileOutputFormat2.class);
463 
464     // Set compression algorithms based on column families
465     configureCompression(conf, table.getTableDescriptor());
466     configureBloomType(table.getTableDescriptor(), conf);
467     configureBlockSize(table.getTableDescriptor(), conf);
468     HTableDescriptor tableDescriptor = table.getTableDescriptor();
469     configureDataBlockEncoding(tableDescriptor, conf);
470 
471     TableMapReduceUtil.addDependencyJars(job);
472     TableMapReduceUtil.initCredentials(job);
473     LOG.info("Incremental table " + table.getName() + " output configured.");
474   }
475 
476   /**
477    * Runs inside the task to deserialize column family to compression algorithm
478    * map from the configuration.
479    *
480    * @param conf to read the serialized values from
481    * @return a map from column family to the configured compression algorithm
482    */
483   @VisibleForTesting
484   static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration
485       conf) {
486     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
487         COMPRESSION_FAMILIES_CONF_KEY);
488     Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
489         Algorithm>(Bytes.BYTES_COMPARATOR);
490     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
491       Algorithm algorithm = AbstractHFileWriter.compressionByName
492           (e.getValue());
493       compressionMap.put(e.getKey(), algorithm);
494     }
495     return compressionMap;
496   }
497 
498   /**
499    * Runs inside the task to deserialize column family to bloom filter type
500    * map from the configuration.
501    *
502    * @param conf to read the serialized values from
503    * @return a map from column family to the the configured bloom filter type
504    */
505   @VisibleForTesting
506   static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
507     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
508         BLOOM_TYPE_FAMILIES_CONF_KEY);
509     Map<byte[], BloomType> bloomTypeMap = new TreeMap<byte[],
510         BloomType>(Bytes.BYTES_COMPARATOR);
511     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
512       BloomType bloomType = BloomType.valueOf(e.getValue());
513       bloomTypeMap.put(e.getKey(), bloomType);
514     }
515     return bloomTypeMap;
516   }
517 
518   /**
519    * Runs inside the task to deserialize column family to block size
520    * map from the configuration.
521    *
522    * @param conf to read the serialized values from
523    * @return a map from column family to the configured block size
524    */
525   @VisibleForTesting
526   static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
527     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
528         BLOCK_SIZE_FAMILIES_CONF_KEY);
529     Map<byte[], Integer> blockSizeMap = new TreeMap<byte[],
530         Integer>(Bytes.BYTES_COMPARATOR);
531     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
532       Integer blockSize = Integer.parseInt(e.getValue());
533       blockSizeMap.put(e.getKey(), blockSize);
534     }
535     return blockSizeMap;
536   }
537 
538   /**
539    * Runs inside the task to deserialize column family to data block encoding
540    * type map from the configuration.
541    *
542    * @param conf to read the serialized values from
543    * @return a map from column family to HFileDataBlockEncoder for the
544    *         configured data block type for the family
545    */
546   @VisibleForTesting
547   static Map<byte[], DataBlockEncoding> createFamilyDataBlockEncodingMap(
548       Configuration conf) {
549     Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
550         DATABLOCK_ENCODING_FAMILIES_CONF_KEY);
551     Map<byte[], DataBlockEncoding> encoderMap = new TreeMap<byte[],
552         DataBlockEncoding>(Bytes.BYTES_COMPARATOR);
553     for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
554       encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue())));
555     }
556     return encoderMap;
557   }
558 
559 
560   /**
561    * Run inside the task to deserialize column family to given conf value map.
562    *
563    * @param conf to read the serialized values from
564    * @param confName conf key to read from the configuration
565    * @return a map of column family to the given configuration value
566    */
567   private static Map<byte[], String> createFamilyConfValueMap(
568       Configuration conf, String confName) {
569     Map<byte[], String> confValMap = new TreeMap<byte[], String>(Bytes.BYTES_COMPARATOR);
570     String confVal = conf.get(confName, "");
571     for (String familyConf : confVal.split("&")) {
572       String[] familySplit = familyConf.split("=");
573       if (familySplit.length != 2) {
574         continue;
575       }
576       try {
577         confValMap.put(URLDecoder.decode(familySplit[0], "UTF-8").getBytes(),
578             URLDecoder.decode(familySplit[1], "UTF-8"));
579       } catch (UnsupportedEncodingException e) {
580         // will not happen with UTF-8 encoding
581         throw new AssertionError(e);
582       }
583     }
584     return confValMap;
585   }
586 
587   /**
588    * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
589    * <code>splitPoints</code>. Cleans up the partitions file after job exists.
590    */
591   static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
592       throws IOException {
593     Configuration conf = job.getConfiguration();
594     // create the partitions file
595     FileSystem fs = FileSystem.get(conf);
596     Path partitionsPath = new Path(conf.get("hbase.fs.tmp.dir"), "partitions_" + UUID.randomUUID());
597     fs.makeQualified(partitionsPath);
598     writePartitions(conf, partitionsPath, splitPoints);
599     fs.deleteOnExit(partitionsPath);
600 
601     // configure job to use it
602     job.setPartitionerClass(TotalOrderPartitioner.class);
603     TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
604   }
605 
606   /**
607    * Serialize column family to compression algorithm map to configuration.
608    * Invoked while configuring the MR job for incremental load.
609    *
610    * @param table to read the properties from
611    * @param conf to persist serialized values into
612    * @throws IOException
613    *           on failure to read column family descriptors
614    */
615   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
616       value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
617   @VisibleForTesting
618   static void configureCompression(Configuration conf, HTableDescriptor tableDescriptor)
619       throws UnsupportedEncodingException {
620     StringBuilder compressionConfigValue = new StringBuilder();
621     if(tableDescriptor == null){
622       // could happen with mock table instance
623       return;
624     }
625     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
626     int i = 0;
627     for (HColumnDescriptor familyDescriptor : families) {
628       if (i++ > 0) {
629         compressionConfigValue.append('&');
630       }
631       compressionConfigValue.append(URLEncoder.encode(
632         familyDescriptor.getNameAsString(), "UTF-8"));
633       compressionConfigValue.append('=');
634       compressionConfigValue.append(URLEncoder.encode(
635         familyDescriptor.getCompression().getName(), "UTF-8"));
636     }
637     // Get rid of the last ampersand
638     conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
639   }
640 
641   /**
642    * Serialize column family to block size map to configuration.
643    * Invoked while configuring the MR job for incremental load.
644    * @param tableDescriptor to read the properties from
645    * @param conf to persist serialized values into
646    *
647    * @throws IOException
648    *           on failure to read column family descriptors
649    */
650   @VisibleForTesting
651   static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
652       throws UnsupportedEncodingException {
653     StringBuilder blockSizeConfigValue = new StringBuilder();
654     if (tableDescriptor == null) {
655       // could happen with mock table instance
656       return;
657     }
658     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
659     int i = 0;
660     for (HColumnDescriptor familyDescriptor : families) {
661       if (i++ > 0) {
662         blockSizeConfigValue.append('&');
663       }
664       blockSizeConfigValue.append(URLEncoder.encode(
665           familyDescriptor.getNameAsString(), "UTF-8"));
666       blockSizeConfigValue.append('=');
667       blockSizeConfigValue.append(URLEncoder.encode(
668           String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
669     }
670     // Get rid of the last ampersand
671     conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
672   }
673 
674   /**
675    * Serialize column family to bloom type map to configuration.
676    * Invoked while configuring the MR job for incremental load.
677    * @param tableDescriptor to read the properties from
678    * @param conf to persist serialized values into
679    *
680    * @throws IOException
681    *           on failure to read column family descriptors
682    */
683   @VisibleForTesting
684   static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf)
685       throws UnsupportedEncodingException {
686     if (tableDescriptor == null) {
687       // could happen with mock table instance
688       return;
689     }
690     StringBuilder bloomTypeConfigValue = new StringBuilder();
691     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
692     int i = 0;
693     for (HColumnDescriptor familyDescriptor : families) {
694       if (i++ > 0) {
695         bloomTypeConfigValue.append('&');
696       }
697       bloomTypeConfigValue.append(URLEncoder.encode(
698         familyDescriptor.getNameAsString(), "UTF-8"));
699       bloomTypeConfigValue.append('=');
700       String bloomType = familyDescriptor.getBloomFilterType().toString();
701       if (bloomType == null) {
702         bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
703       }
704       bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
705     }
706     conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
707   }
708 
709   /**
710    * Serialize column family to data block encoding map to configuration.
711    * Invoked while configuring the MR job for incremental load.
712    *
713    * @param table to read the properties from
714    * @param conf to persist serialized values into
715    * @throws IOException
716    *           on failure to read column family descriptors
717    */
718   @VisibleForTesting
719   static void configureDataBlockEncoding(HTableDescriptor tableDescriptor,
720       Configuration conf) throws UnsupportedEncodingException {
721     if (tableDescriptor == null) {
722       // could happen with mock table instance
723       return;
724     }
725     StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
726     Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
727     int i = 0;
728     for (HColumnDescriptor familyDescriptor : families) {
729       if (i++ > 0) {
730         dataBlockEncodingConfigValue.append('&');
731       }
732       dataBlockEncodingConfigValue.append(
733           URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
734       dataBlockEncodingConfigValue.append('=');
735       DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
736       if (encoding == null) {
737         encoding = DataBlockEncoding.NONE;
738       }
739       dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
740           "UTF-8"));
741     }
742     conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
743         dataBlockEncodingConfigValue.toString());
744   }
745 }