View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.util.hbck;
19  
20  import static org.junit.Assert.assertEquals;
21  
22  import java.io.IOException;
23  import java.util.ArrayList;
24  import java.util.List;
25  import java.util.Map;
26  import java.util.Map.Entry;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.fs.FSDataOutputStream;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.HBaseTestingUtility;
35  import org.apache.hadoop.hbase.HColumnDescriptor;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.HTableDescriptor;
38  import org.apache.hadoop.hbase.testclassification.LargeTests;
39  import org.apache.hadoop.hbase.NamespaceDescriptor;
40  import org.apache.hadoop.hbase.ServerName;
41  import org.apache.hadoop.hbase.TableName;
42  import org.apache.hadoop.hbase.MetaTableAccessor;
43  import org.apache.hadoop.hbase.client.Admin;
44  import org.apache.hadoop.hbase.client.Connection;
45  import org.apache.hadoop.hbase.client.ConnectionFactory;
46  import org.apache.hadoop.hbase.client.Delete;
47  import org.apache.hadoop.hbase.client.HBaseAdmin;
48  import org.apache.hadoop.hbase.client.HTable;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.Result;
51  import org.apache.hadoop.hbase.client.ResultScanner;
52  import org.apache.hadoop.hbase.client.Scan;
53  import org.apache.hadoop.hbase.client.Table;
54  import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
55  import org.apache.hadoop.hbase.util.Bytes;
56  import org.apache.hadoop.hbase.util.FSUtils;
57  import org.apache.zookeeper.KeeperException;
58  import org.junit.After;
59  import org.junit.Before;
60  import org.junit.experimental.categories.Category;
61  
62  /**
63   * This testing base class creates a minicluster and testing table table
64   * and shuts down the cluster afterwards. It also provides methods wipes out
65   * meta and to inject errors into meta and the file system.
66   *
67   * Tests should generally break stuff, then attempt to rebuild the meta table
68   * offline, then restart hbase, and finally perform checks.
69   *
70   * NOTE: This is a slow set of tests which takes ~30s each needs to run on a
71   * relatively beefy machine. It seems necessary to have each test in a new jvm
72   * since minicluster startup and tear downs seem to leak file handles and
73   * eventually cause out of file handle exceptions.
74   */
75  @Category(LargeTests.class)
76  public class OfflineMetaRebuildTestCore {
77    protected final static Log LOG = LogFactory
78        .getLog(OfflineMetaRebuildTestCore.class);
79    protected HBaseTestingUtility TEST_UTIL;
80    protected Configuration conf;
81    private final static byte[] FAM = Bytes.toBytes("fam");
82  
83    // for the instance, reset every test run
84    protected Table htbl;
85    protected final static byte[][] splits = new byte[][] { Bytes.toBytes("A"),
86        Bytes.toBytes("B"), Bytes.toBytes("C") };
87  
88    private final static String TABLE_BASE = "tableMetaRebuild";
89    private static int tableIdx = 0;
90    protected TableName table = TableName.valueOf("tableMetaRebuild");
91    protected Connection connection;
92  
93    @Before
94    public void setUpBefore() throws Exception {
95      TEST_UTIL = new HBaseTestingUtility();
96      TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
97      TEST_UTIL.startMiniCluster(3);
98      conf = TEST_UTIL.getConfiguration();
99      this.connection = ConnectionFactory.createConnection(conf);
100     assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);
101 
102     // setup the table
103     table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
104     tableIdx++;
105     htbl = setupTable(table);
106     populateTable(htbl);
107     assertEquals(5, scanMeta());
108     LOG.info("Table " + table + " has " + tableRowCount(conf, table)
109         + " entries.");
110     assertEquals(16, tableRowCount(conf, table));
111     TEST_UTIL.getHBaseAdmin().disableTable(table);
112     assertEquals(1, TEST_UTIL.getHBaseAdmin().listTables().length);
113   }
114 
115   @After
116   public void tearDownAfter() throws Exception {
117     if (this.htbl != null) {
118       this.htbl.close();
119       this.htbl = null;
120     }
121     this.connection.close();
122     TEST_UTIL.shutdownMiniCluster();
123   }
124 
125   /**
126    * Setup a clean table before we start mucking with it.
127    *
128    * @throws IOException
129    * @throws InterruptedException
130    * @throws KeeperException
131    */
132   private Table setupTable(TableName tablename) throws Exception {
133     HTableDescriptor desc = new HTableDescriptor(tablename);
134     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
135     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
136     TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
137     return this.connection.getTable(tablename);
138   }
139 
140   private void dumpMeta(HTableDescriptor htd) throws IOException {
141     List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
142     for (byte[] row : metaRows) {
143       LOG.info(Bytes.toString(row));
144     }
145   }
146 
147   private void populateTable(Table tbl) throws IOException {
148     byte[] values = { 'A', 'B', 'C', 'D' };
149     List<Put> puts = new ArrayList<>();
150     for (int i = 0; i < values.length; i++) {
151       for (int j = 0; j < values.length; j++) {
152         Put put = new Put(new byte[] { values[i], values[j] });
153         put.add(Bytes.toBytes("fam"), new byte[] {}, new byte[] { values[i],
154             values[j] });
155         puts.add(put);
156       }
157     }
158     tbl.put(puts);
159   }
160 
161   /**
162    * delete table in preparation for next test
163    *
164    * @param tablename
165    * @throws IOException
166    */
167   void deleteTable(HBaseAdmin admin, String tablename) throws IOException {
168     try {
169       byte[] tbytes = Bytes.toBytes(tablename);
170       admin.disableTable(tbytes);
171       admin.deleteTable(tbytes);
172     } catch (Exception e) {
173       // Do nothing.
174     }
175   }
176 
177   protected void deleteRegion(Configuration conf, final Table tbl,
178       byte[] startKey, byte[] endKey) throws IOException {
179 
180     LOG.info("Before delete:");
181     HTableDescriptor htd = tbl.getTableDescriptor();
182     dumpMeta(htd);
183 
184     Map<HRegionInfo, ServerName> hris = ((HTable)tbl).getRegionLocations();
185     for (Entry<HRegionInfo, ServerName> e : hris.entrySet()) {
186       HRegionInfo hri = e.getKey();
187       ServerName hsa = e.getValue();
188       if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
189           && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
190 
191         LOG.info("RegionName: " + hri.getRegionNameAsString());
192         byte[] deleteRow = hri.getRegionName();
193         TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);
194 
195         LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
196         Path rootDir = FSUtils.getRootDir(conf);
197         FileSystem fs = rootDir.getFileSystem(conf);
198         Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()),
199             hri.getEncodedName());
200         fs.delete(p, true);
201 
202         try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
203           Delete delete = new Delete(deleteRow);
204           meta.delete(delete);
205         }
206       }
207       LOG.info(hri.toString() + hsa.toString());
208     }
209 
210     TEST_UTIL.getMetaTableRows(htd.getTableName());
211     LOG.info("After delete:");
212     dumpMeta(htd);
213   }
214 
215   protected HRegionInfo createRegion(Configuration conf, final Table htbl,
216       byte[] startKey, byte[] endKey) throws IOException {
217     Table meta = new HTable(conf, TableName.META_TABLE_NAME);
218     HTableDescriptor htd = htbl.getTableDescriptor();
219     HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);
220 
221     LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
222     Path rootDir = FSUtils.getRootDir(conf);
223     FileSystem fs = rootDir.getFileSystem(conf);
224     Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
225         hri.getEncodedName());
226     fs.mkdirs(p);
227     Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
228     FSDataOutputStream out = fs.create(riPath);
229     out.write(hri.toDelimitedByteArray());
230     out.close();
231 
232     // add to meta.
233     MetaTableAccessor.addRegionToMeta(meta, hri);
234     meta.close();
235     return hri;
236   }
237 
238   protected void wipeOutMeta() throws IOException {
239     // Mess it up by blowing up meta.
240     Admin admin = TEST_UTIL.getHBaseAdmin();
241     Scan s = new Scan();
242     Table meta = new HTable(conf, TableName.META_TABLE_NAME);
243     ResultScanner scanner = meta.getScanner(s);
244     List<Delete> dels = new ArrayList<Delete>();
245     for (Result r : scanner) {
246       HRegionInfo info =
247           HRegionInfo.getHRegionInfo(r);
248       if(info != null && !info.getTable().getNamespaceAsString()
249           .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
250         Delete d = new Delete(r.getRow());
251         dels.add(d);
252         admin.unassign(r.getRow(), true);
253       }
254     }
255     meta.delete(dels);
256     scanner.close();
257     meta.close();
258   }
259 
260   /**
261    * Returns the number of rows in a given table. HBase must be up and the table
262    * should be present (will wait for timeout for a while otherwise)
263    *
264    * @return # of rows in the specified table
265    */
266   protected int tableRowCount(Configuration conf, TableName table)
267       throws IOException {
268     Table t = new HTable(conf, table);
269     Scan st = new Scan();
270 
271     ResultScanner rst = t.getScanner(st);
272     int count = 0;
273     for (@SuppressWarnings("unused")
274     Result rt : rst) {
275       count++;
276     }
277     t.close();
278     return count;
279   }
280 
281   /**
282    * Dumps hbase:meta table info
283    *
284    * @return # of entries in meta.
285    */
286   protected int scanMeta() throws IOException {
287     int count = 0;
288     HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
289     ResultScanner scanner = meta.getScanner(new Scan());
290     LOG.info("Table: " + Bytes.toString(meta.getTableName()));
291     for (Result res : scanner) {
292       LOG.info(Bytes.toString(res.getRow()));
293       count++;
294     }
295     meta.close();
296     return count;
297   }
298 
299   protected HTableDescriptor[] getTables(final Configuration configuration) throws IOException {
300     HTableDescriptor[] htbls = null;
301     try (Connection connection = ConnectionFactory.createConnection(configuration)) {
302       try (Admin admin = connection.getAdmin()) {
303         htbls = admin.listTables();
304       }
305     }
306     return htbls;
307   }
308 }