View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertTrue;
25  
26  import java.io.ByteArrayInputStream;
27  import java.io.DataInputStream;
28  import java.io.IOException;
29  import java.nio.ByteBuffer;
30  import java.util.ArrayList;
31  import java.util.Collection;
32  import java.util.List;
33  import java.util.Random;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FSDataInputStream;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.HBaseTestingUtility;
42  import org.apache.hadoop.hbase.HConstants;
43  import org.apache.hadoop.hbase.KeyValue;
44  import org.apache.hadoop.hbase.KeyValue.KVComparator;
45  import org.apache.hadoop.hbase.testclassification.SmallTests;
46  import org.apache.hadoop.hbase.Tag;
47  import org.apache.hadoop.hbase.io.compress.Compression;
48  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
49  import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.hbase.util.Writables;
52  import org.apache.hadoop.io.Text;
53  import org.apache.hadoop.io.WritableUtils;
54  import org.junit.Before;
55  import org.junit.Test;
56  import org.junit.experimental.categories.Category;
57  import org.junit.runner.RunWith;
58  import org.junit.runners.Parameterized;
59  import org.junit.runners.Parameterized.Parameters;
60  
61  /**
62   * Testing writing a version 3 {@link HFile}. This is a low-level test written
63   * during the development of {@link HFileWriterV3}.
64   */
65  @RunWith(Parameterized.class)
66  @Category(SmallTests.class)
67  public class TestHFileWriterV3 {
68  
69    private static final Log LOG = LogFactory.getLog(TestHFileWriterV3.class);
70  
71    private static final HBaseTestingUtility TEST_UTIL =
72        new HBaseTestingUtility();
73  
74    private Configuration conf;
75    private FileSystem fs;
76    private boolean useTags;
77    public TestHFileWriterV3(boolean useTags) {
78      this.useTags = useTags;
79    }
80    @Parameters
81    public static Collection<Object[]> parameters() {
82      return HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
83    }
84  
85    @Before
86    public void setUp() throws IOException {
87      conf = TEST_UTIL.getConfiguration();
88      fs = FileSystem.get(conf);
89    }
90  
91    @Test
92    public void testHFileFormatV3() throws IOException {
93      testHFileFormatV3Internals(useTags);
94    }
95  
96    private void testHFileFormatV3Internals(boolean useTags) throws IOException {
97      Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "testHFileFormatV3");
98      final Compression.Algorithm compressAlgo = Compression.Algorithm.GZ;
99      final int entryCount = 10000;
100     writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, false, useTags);
101   }
102 
103   @Test
104   public void testMidKeyInHFile() throws IOException{
105     testMidKeyInHFileInternals(useTags);
106   }
107 
108   private void testMidKeyInHFileInternals(boolean useTags) throws IOException {
109     Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
110     "testMidKeyInHFile");
111     Compression.Algorithm compressAlgo = Compression.Algorithm.NONE;
112     int entryCount = 50000;
113     writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true, useTags);
114   }
115 
116   private void writeDataAndReadFromHFile(Path hfilePath,
117       Algorithm compressAlgo, int entryCount, boolean findMidKey, boolean useTags) throws IOException {
118     HFileContext context = new HFileContextBuilder()
119                            .withBlockSize(4096)
120                            .withIncludesTags(useTags)
121                            .withCompression(compressAlgo).build();
122     HFileWriterV3 writer = (HFileWriterV3)
123         new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
124             .withPath(fs, hfilePath)
125             .withFileContext(context)
126             .withComparator(KeyValue.COMPARATOR)
127             .create();
128 
129     Random rand = new Random(9713312); // Just a fixed seed.
130     List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);
131 
132     for (int i = 0; i < entryCount; ++i) {
133       byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i);
134 
135       // A random-length random value.
136       byte[] valueBytes = TestHFileWriterV2.randomValue(rand);
137       KeyValue keyValue = null;
138       if (useTags) {
139         ArrayList<Tag> tags = new ArrayList<Tag>();
140         for (int j = 0; j < 1 + rand.nextInt(4); j++) {
141           byte[] tagBytes = new byte[16];
142           rand.nextBytes(tagBytes);
143           tags.add(new Tag((byte) 1, tagBytes));
144         }
145         keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP,
146             valueBytes, tags);
147       } else {
148         keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP,
149             valueBytes);
150       }
151       writer.append(keyValue);
152       keyValues.add(keyValue);
153     }
154 
155     // Add in an arbitrary order. They will be sorted lexicographically by
156     // the key.
157     writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
158     writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
159     writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));
160 
161     writer.close();
162     
163 
164     FSDataInputStream fsdis = fs.open(hfilePath);
165 
166     long fileSize = fs.getFileStatus(hfilePath).getLen();
167     FixedFileTrailer trailer =
168         FixedFileTrailer.readFromStream(fsdis, fileSize);
169 
170     assertEquals(3, trailer.getMajorVersion());
171     assertEquals(entryCount, trailer.getEntryCount());
172     HFileContext meta = new HFileContextBuilder()
173                         .withCompression(compressAlgo)
174                         .withIncludesMvcc(false)
175                         .withIncludesTags(useTags)
176                         .withHBaseCheckSum(true).build();
177     HFileBlock.FSReader blockReader =
178         new HFileBlock.FSReaderImpl(fsdis, fileSize, meta);
179  // Comparator class name is stored in the trailer in version 2.
180     KVComparator comparator = trailer.createComparator();
181     HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
182         new HFileBlockIndex.BlockIndexReader(comparator,
183             trailer.getNumDataIndexLevels());
184     HFileBlockIndex.BlockIndexReader metaBlockIndexReader =
185         new HFileBlockIndex.BlockIndexReader(
186             KeyValue.RAW_COMPARATOR, 1);
187 
188     HFileBlock.BlockIterator blockIter = blockReader.blockRange(
189         trailer.getLoadOnOpenDataOffset(),
190         fileSize - trailer.getTrailerSize());
191     // Data index. We also read statistics about the block index written after
192     // the root level.
193     dataBlockIndexReader.readMultiLevelIndexRoot(
194         blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX), trailer.getDataIndexCount());
195     
196     if (findMidKey) {
197       byte[] midkey = dataBlockIndexReader.midkey();
198       assertNotNull("Midkey should not be null", midkey);
199     }
200     
201     // Meta index.
202     metaBlockIndexReader.readRootIndex(
203         blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX)
204           .getByteStream(), trailer.getMetaIndexCount());
205     // File info
206     FileInfo fileInfo = new FileInfo();
207     fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
208     byte [] keyValueFormatVersion = fileInfo.get(
209         HFileWriterV3.KEY_VALUE_VERSION);
210     boolean includeMemstoreTS = keyValueFormatVersion != null &&
211         Bytes.toInt(keyValueFormatVersion) > 0;
212 
213     // Counters for the number of key/value pairs and the number of blocks
214     int entriesRead = 0;
215     int blocksRead = 0;
216     long memstoreTS = 0;
217 
218     // Scan blocks the way the reader would scan them
219     fsdis.seek(0);
220     long curBlockPos = 0;
221     while (curBlockPos <= trailer.getLastDataBlockOffset()) {
222       HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false, false)
223         .unpack(context, blockReader);
224       assertEquals(BlockType.DATA, block.getBlockType());
225       ByteBuffer buf = block.getBufferWithoutHeader();
226       int keyLen = -1;
227       while (buf.hasRemaining()) {
228 
229         keyLen = buf.getInt();
230 
231         int valueLen = buf.getInt();
232 
233         byte[] key = new byte[keyLen];
234         buf.get(key);
235 
236         byte[] value = new byte[valueLen];
237         buf.get(value);
238         byte[] tagValue = null;
239         if (useTags) {
240           int tagLen = ((buf.get() & 0xff) << 8) ^ (buf.get() & 0xff);
241           tagValue = new byte[tagLen];
242           buf.get(tagValue);
243         }
244       
245         if (includeMemstoreTS) {
246           ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(), buf.arrayOffset()
247               + buf.position(), buf.remaining());
248           DataInputStream data_input = new DataInputStream(byte_input);
249 
250           memstoreTS = WritableUtils.readVLong(data_input);
251           buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
252         }
253 
254         // A brute-force check to see that all keys and values are correct.
255         assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0);
256         assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0);
257         if (useTags) {
258           assertNotNull(tagValue);
259           KeyValue tkv =  keyValues.get(entriesRead);
260           assertEquals(tagValue.length, tkv.getTagsLength());
261           assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(),
262               tkv.getTagsOffset(), tkv.getTagsLength()) == 0);
263         }
264         ++entriesRead;
265       }
266       ++blocksRead;
267       curBlockPos += block.getOnDiskSizeWithHeader();
268     }
269     LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead="
270         + blocksRead);
271     assertEquals(entryCount, entriesRead);
272 
273     // Meta blocks. We can scan until the load-on-open data offset (which is
274     // the root block index offset in version 2) because we are not testing
275     // intermediate-level index blocks here.
276 
277     int metaCounter = 0;
278     while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
279       LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " +
280           trailer.getLoadOnOpenDataOffset());
281       HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false, false)
282         .unpack(context, blockReader);
283       assertEquals(BlockType.META, block.getBlockType());
284       Text t = new Text();
285       ByteBuffer buf = block.getBufferWithoutHeader();
286       if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
287         throw new IOException("Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
288       }
289       Text expectedText =
290           (metaCounter == 0 ? new Text("Paris") : metaCounter == 1 ? new Text(
291               "Moscow") : new Text("Washington, D.C."));
292       assertEquals(expectedText, t);
293       LOG.info("Read meta block data: " + t);
294       ++metaCounter;
295       curBlockPos += block.getOnDiskSizeWithHeader();
296     }
297 
298     fsdis.close();
299   }
300 
301 }
302