View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.util;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  
26  import org.apache.commons.logging.Log;
27  import org.apache.commons.logging.LogFactory;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.Cell;
30  import org.apache.hadoop.hbase.CellUtil;
31  import org.apache.hadoop.hbase.HBaseTestCase;
32  import org.apache.hadoop.hbase.HBaseTestingUtility;
33  import org.apache.hadoop.hbase.HColumnDescriptor;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.HTableDescriptor;
37  import org.apache.hadoop.hbase.client.Get;
38  import org.apache.hadoop.hbase.client.Put;
39  import org.apache.hadoop.hbase.client.Result;
40  import org.apache.hadoop.hbase.client.Scan;
41  import org.apache.hadoop.hbase.regionserver.HRegion;
42  import org.apache.hadoop.hbase.regionserver.InternalScanner;
43  import org.apache.hadoop.hbase.testclassification.LargeTests;
44  import org.apache.hadoop.hbase.wal.WAL;
45  import org.apache.hadoop.hbase.wal.WALFactory;
46  import org.apache.hadoop.hdfs.MiniDFSCluster;
47  import org.apache.hadoop.util.ToolRunner;
48  import org.junit.experimental.categories.Category;
49  
50  /** Test stand alone merge tool that can merge arbitrary regions */
51  @Category(LargeTests.class)
52  public class TestMergeTool extends HBaseTestCase {
53    static final Log LOG = LogFactory.getLog(TestMergeTool.class);
54    HBaseTestingUtility TEST_UTIL;
55  //  static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
56    static final byte [] FAMILY = Bytes.toBytes("contents");
57    static final byte [] QUALIFIER = Bytes.toBytes("dc");
58  
59    private final HRegionInfo[] sourceRegions = new HRegionInfo[5];
60    private final HRegion[] regions = new HRegion[5];
61    private HTableDescriptor desc;
62    private byte [][][] rows;
63    private MiniDFSCluster dfsCluster = null;
64    private WALFactory wals;
65  
66    @Override
67    public void setUp() throws Exception {
68      // Set the timeout down else this test will take a while to complete.
69      this.conf.setLong("hbase.zookeeper.recoverable.waittime", 10);
70      // Make it so we try and connect to a zk that is not there (else we might
71      // find a zk ensemble put up by another concurrent test and this will
72      // mess up this test.  Choose unlikely port. Default test port is 21818.
73      // Default zk port is 2181.
74      this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 10001);
75  
76      this.conf.set("hbase.hstore.compactionThreshold", "2");
77  
78      // Create table description
79      this.desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf("TestMergeTool"));
80      this.desc.addFamily(new HColumnDescriptor(FAMILY));
81  
82      /*
83       * Create the HRegionInfos for the regions.
84       */
85      // Region 0 will contain the key range [row_0200,row_0300)
86      sourceRegions[0] = new HRegionInfo(this.desc.getTableName(),
87          Bytes.toBytes("row_0200"),
88        Bytes.toBytes("row_0300"));
89  
90      // Region 1 will contain the key range [row_0250,row_0400) and overlaps
91      // with Region 0
92      sourceRegions[1] =
93        new HRegionInfo(this.desc.getTableName(),
94            Bytes.toBytes("row_0250"),
95            Bytes.toBytes("row_0400"));
96  
97      // Region 2 will contain the key range [row_0100,row_0200) and is adjacent
98      // to Region 0 or the region resulting from the merge of Regions 0 and 1
99      sourceRegions[2] =
100       new HRegionInfo(this.desc.getTableName(),
101           Bytes.toBytes("row_0100"),
102           Bytes.toBytes("row_0200"));
103 
104     // Region 3 will contain the key range [row_0500,row_0600) and is not
105     // adjacent to any of Regions 0, 1, 2 or the merged result of any or all
106     // of those regions
107     sourceRegions[3] =
108       new HRegionInfo(this.desc.getTableName(),
109           Bytes.toBytes("row_0500"),
110           Bytes.toBytes("row_0600"));
111 
112     // Region 4 will have empty start and end keys and overlaps all regions.
113     sourceRegions[4] =
114       new HRegionInfo(this.desc.getTableName(),
115           HConstants.EMPTY_BYTE_ARRAY,
116           HConstants.EMPTY_BYTE_ARRAY);
117 
118     /*
119      * Now create some row keys
120      */
121     this.rows = new byte [5][][];
122     this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
123     this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350",
124         "row_035" });
125     this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175",
126         "row_0175", "row_0175"});
127     this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560",
128         "row_0560", "row_0560", "row_0560"});
129     this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000",
130         "row_1000", "row_1000", "row_1000", "row_1000" });
131 
132     // Start up dfs
133     TEST_UTIL = new HBaseTestingUtility(conf);
134     this.dfsCluster = TEST_UTIL.startMiniDFSCluster(2);
135     this.fs = this.dfsCluster.getFileSystem();
136     System.out.println("fs=" + this.fs);
137     FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
138     TEST_UTIL.createRootDir();
139 
140     // Note: we must call super.setUp after starting the mini cluster or
141     // we will end up with a local file system
142 
143     super.setUp();
144     wals = new WALFactory(conf, null, "TestMergeTool");
145     try {
146       // Create meta region
147       createMetaRegion();
148       new FSTableDescriptors(conf, this.fs, this.testDir).createTableDescriptor(this.desc);
149       /*
150        * Create the regions we will merge
151        */
152       for (int i = 0; i < sourceRegions.length; i++) {
153         regions[i] =
154           HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf,
155               this.desc);
156         /*
157          * Insert data
158          */
159         for (int j = 0; j < rows[i].length; j++) {
160           byte [] row = rows[i][j];
161           Put put = new Put(row);
162           put.add(FAMILY, QUALIFIER, row);
163           regions[i].put(put);
164         }
165         HRegion.addRegionToMETA(meta, regions[i]);
166       }
167       // Close root and meta regions
168       closeRootAndMeta();
169 
170     } catch (Exception e) {
171       TEST_UTIL.shutdownMiniCluster();
172       throw e;
173     }
174   }
175 
176   @Override
177   public void tearDown() throws Exception {
178     super.tearDown();
179     for (int i = 0; i < sourceRegions.length; i++) {
180       HRegion r = regions[i];
181       if (r != null) {
182         HRegion.closeHRegion(r);
183       }
184     }
185     wals.close();
186     TEST_UTIL.shutdownMiniCluster();
187   }
188 
189   /*
190    * @param msg Message that describes this merge
191    * @param regionName1
192    * @param regionName2
193    * @param log Log to use merging.
194    * @param upperbound Verifying, how high up in this.rows to go.
195    * @return Merged region.
196    * @throws Exception
197    */
198   private HRegion mergeAndVerify(final String msg, final String regionName1,
199     final String regionName2, final WAL log, final int upperbound)
200   throws Exception {
201     Merge merger = new Merge(this.conf);
202     LOG.info(msg);
203     LOG.info("fs2=" + this.conf.get("fs.defaultFS"));
204     int errCode = ToolRunner.run(this.conf, merger,
205       new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2}
206     );
207     assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0);
208     HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
209 
210     // Now verify that we can read all the rows from regions 0, 1
211     // in the new merged region.
212     HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf);
213     verifyMerge(merged, upperbound);
214     merged.close();
215     LOG.info("Verified " + msg);
216     return merged;
217   }
218 
219   private void verifyMerge(final HRegion merged, final int upperbound)
220   throws IOException {
221     //Test
222     Scan scan = new Scan();
223     scan.addFamily(FAMILY);
224     InternalScanner scanner = merged.getScanner(scan);
225     try {
226     List<Cell> testRes = null;
227       while (true) {
228         testRes = new ArrayList<Cell>();
229         boolean hasNext = scanner.next(testRes);
230         if (!hasNext) {
231           break;
232         }
233       }
234     } finally {
235       scanner.close();
236     }
237 
238     //!Test
239 
240     for (int i = 0; i < upperbound; i++) {
241       for (int j = 0; j < rows[i].length; j++) {
242         Get get = new Get(rows[i][j]);
243         get.addFamily(FAMILY);
244         Result result = merged.get(get);
245         assertEquals(1, result.size());
246         byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
247         assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
248         assertTrue(Bytes.equals(bytes, rows[i][j]));
249       }
250     }
251   }
252 
253   /**
254    * Test merge tool.
255    * @throws Exception
256    */
257   public void testMergeTool() throws Exception {
258     // First verify we can read the rows from the source regions and that they
259     // contain the right data.
260     for (int i = 0; i < regions.length; i++) {
261       for (int j = 0; j < rows[i].length; j++) {
262         Get get = new Get(rows[i][j]);
263         get.addFamily(FAMILY);
264         Result result = regions[i].get(get);
265         byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
266         assertNotNull(bytes);
267         assertTrue(Bytes.equals(bytes, rows[i][j]));
268       }
269       // Close the region and delete the log
270       HRegion.closeHRegion(regions[i]);
271     }
272     WAL log = wals.getWAL(new byte[]{});
273      // Merge Region 0 and Region 1
274     HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
275       this.sourceRegions[0].getRegionNameAsString(),
276       this.sourceRegions[1].getRegionNameAsString(), log, 2);
277 
278     // Merge the result of merging regions 0 and 1 with region 2
279     merged = mergeAndVerify("merging regions 0+1 and 2",
280       merged.getRegionInfo().getRegionNameAsString(),
281       this.sourceRegions[2].getRegionNameAsString(), log, 3);
282 
283     // Merge the result of merging regions 0, 1 and 2 with region 3
284     merged = mergeAndVerify("merging regions 0+1+2 and 3",
285       merged.getRegionInfo().getRegionNameAsString(),
286       this.sourceRegions[3].getRegionNameAsString(), log, 4);
287 
288     // Merge the result of merging regions 0, 1, 2 and 3 with region 4
289     merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
290       merged.getRegionInfo().getRegionNameAsString(),
291       this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
292   }
293 
294 }
295