View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.coprocessor;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertTrue;
26  
27  import java.io.IOException;
28  import java.lang.reflect.Method;
29  import java.util.ArrayList;
30  import java.util.List;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.hbase.Cell;
38  import org.apache.hadoop.hbase.CellUtil;
39  import org.apache.hadoop.hbase.Coprocessor;
40  import org.apache.hadoop.hbase.HBaseTestingUtility;
41  import org.apache.hadoop.hbase.HColumnDescriptor;
42  import org.apache.hadoop.hbase.HRegionInfo;
43  import org.apache.hadoop.hbase.HTableDescriptor;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.testclassification.MediumTests;
46  import org.apache.hadoop.hbase.MiniHBaseCluster;
47  import org.apache.hadoop.hbase.ServerName;
48  import org.apache.hadoop.hbase.TableName;
49  import org.apache.hadoop.hbase.client.Admin;
50  import org.apache.hadoop.hbase.client.Append;
51  import org.apache.hadoop.hbase.client.Delete;
52  import org.apache.hadoop.hbase.client.Durability;
53  import org.apache.hadoop.hbase.client.Get;
54  import org.apache.hadoop.hbase.client.HTable;
55  import org.apache.hadoop.hbase.client.Increment;
56  import org.apache.hadoop.hbase.client.Put;
57  import org.apache.hadoop.hbase.client.Result;
58  import org.apache.hadoop.hbase.client.ResultScanner;
59  import org.apache.hadoop.hbase.client.RowMutations;
60  import org.apache.hadoop.hbase.client.Scan;
61  import org.apache.hadoop.hbase.client.Table;
62  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
63  import org.apache.hadoop.hbase.io.hfile.HFile;
64  import org.apache.hadoop.hbase.io.hfile.HFileContext;
65  import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
66  import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
67  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
68  import org.apache.hadoop.hbase.regionserver.HRegion;
69  import org.apache.hadoop.hbase.regionserver.InternalScanner;
70  import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext;
71  import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
72  import org.apache.hadoop.hbase.regionserver.ScanType;
73  import org.apache.hadoop.hbase.regionserver.ScannerContext;
74  import org.apache.hadoop.hbase.regionserver.Store;
75  import org.apache.hadoop.hbase.regionserver.StoreFile;
76  import org.apache.hadoop.hbase.util.Bytes;
77  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
78  import org.apache.hadoop.hbase.util.JVMClusterUtil;
79  import org.apache.hadoop.hbase.util.Threads;
80  import org.junit.AfterClass;
81  import org.junit.BeforeClass;
82  import org.junit.Ignore;
83  import org.junit.Test;
84  import org.junit.experimental.categories.Category;
85  
86  @Category(MediumTests.class)
87  public class TestRegionObserverInterface {
88    static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class);
89  
90    public static final TableName TEST_TABLE = TableName.valueOf("TestTable");
91    public final static byte[] A = Bytes.toBytes("a");
92    public final static byte[] B = Bytes.toBytes("b");
93    public final static byte[] C = Bytes.toBytes("c");
94    public final static byte[] ROW = Bytes.toBytes("testrow");
95  
96    private static HBaseTestingUtility util = new HBaseTestingUtility();
97    private static MiniHBaseCluster cluster = null;
98  
99    @BeforeClass
100   public static void setupBeforeClass() throws Exception {
101     // set configure to indicate which cp should be loaded
102     Configuration conf = util.getConfiguration();
103     conf.setBoolean("hbase.master.distributed.log.replay", true);
104     conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
105         "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver",
106         "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver$Legacy");
107 
108     util.startMiniCluster();
109     cluster = util.getMiniHBaseCluster();
110   }
111 
112   @AfterClass
113   public static void tearDownAfterClass() throws Exception {
114     util.shutdownMiniCluster();
115   }
116 
117   @Test (timeout=300000)
118   public void testRegionObserver() throws IOException {
119     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
120     // recreate table every time in order to reset the status of the
121     // coprocessor.
122     Table table = util.createTable(tableName, new byte[][] {A, B, C});
123     try {
124       verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
125           "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation",
126           "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName,
127         new Boolean[] { false, false, false, false, false, false, false, false });
128 
129       Put put = new Put(ROW);
130       put.add(A, A, A);
131       put.add(B, B, B);
132       put.add(C, C, C);
133       table.put(put);
134 
135       verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
136           "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete",
137           "hadPostStartRegionOperation", "hadPostCloseRegionOperation",
138           "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true,
139           true, true, true, false, true, true, true });
140 
141       verifyMethodResult(SimpleRegionObserver.class,
142           new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
143           tableName,
144           new Integer[] {1, 1, 0, 0});
145 
146       Get get = new Get(ROW);
147       get.addColumn(A, A);
148       get.addColumn(B, B);
149       get.addColumn(C, C);
150       table.get(get);
151 
152       verifyMethodResult(SimpleRegionObserver.class,
153           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
154       "hadDelete", "hadPrePreparedDeleteTS"},
155       tableName,
156       new Boolean[] {true, true, true, true, false, false}
157           );
158 
159       Delete delete = new Delete(ROW);
160       delete.deleteColumn(A, A);
161       delete.deleteColumn(B, B);
162       delete.deleteColumn(C, C);
163       table.delete(delete);
164 
165       verifyMethodResult(SimpleRegionObserver.class,
166           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
167         "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS"},
168         tableName,
169         new Boolean[] {true, true, true, true, true, true, true, true}
170           );
171     } finally {
172       util.deleteTable(tableName);
173       table.close();
174     }
175     verifyMethodResult(SimpleRegionObserver.class,
176         new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
177         tableName,
178         new Integer[] {1, 1, 1, 1});
179   }
180 
181   @Test (timeout=300000)
182   public void testRowMutation() throws IOException {
183     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
184     Table table = util.createTable(tableName, new byte[][] {A, B, C});
185     try {
186       verifyMethodResult(SimpleRegionObserver.class,
187         new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
188             "hadDeleted"},
189         tableName,
190         new Boolean[] {false, false, false, false, false});
191       Put put = new Put(ROW);
192       put.add(A, A, A);
193       put.add(B, B, B);
194       put.add(C, C, C);
195 
196       Delete delete = new Delete(ROW);
197       delete.deleteColumn(A, A);
198       delete.deleteColumn(B, B);
199       delete.deleteColumn(C, C);
200 
201       RowMutations arm = new RowMutations(ROW);
202       arm.add(put);
203       arm.add(delete);
204       table.mutateRow(arm);
205 
206       verifyMethodResult(SimpleRegionObserver.class,
207           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
208       "hadDeleted"},
209       tableName,
210       new Boolean[] {false, false, true, true, true}
211           );
212     } finally {
213       util.deleteTable(tableName);
214       table.close();
215     }
216   }
217 
218   @Test (timeout=300000)
219   public void testIncrementHook() throws IOException {
220     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
221     Table table = util.createTable(tableName, new byte[][] {A, B, C});
222     try {
223       Increment inc = new Increment(Bytes.toBytes(0));
224       inc.addColumn(A, A, 1);
225 
226       verifyMethodResult(SimpleRegionObserver.class,
227           new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
228           tableName,
229           new Boolean[] {false, false, false}
230           );
231 
232       table.increment(inc);
233 
234       verifyMethodResult(SimpleRegionObserver.class,
235           new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
236           tableName,
237           new Boolean[] {true, true, true}
238           );
239     } finally {
240       util.deleteTable(tableName);
241       table.close();
242     }
243   }
244 
245   @Test (timeout=300000)
246   public void testCheckAndPutHooks() throws IOException {
247     TableName tableName =
248         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks");
249     try (Table table = util.createTable(tableName, new byte[][] {A, B, C})) {
250       Put p = new Put(Bytes.toBytes(0));
251       p.add(A, A, A);
252       table.put(p);
253       p = new Put(Bytes.toBytes(0));
254       p.add(A, A, A);
255       verifyMethodResult(SimpleRegionObserver.class,
256           new String[] {"hadPreCheckAndPut",
257               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
258           tableName,
259           new Boolean[] {false, false, false}
260           );
261       table.checkAndPut(Bytes.toBytes(0), A, A, A, p);
262       verifyMethodResult(SimpleRegionObserver.class,
263           new String[] {"hadPreCheckAndPut",
264               "hadPreCheckAndPutAfterRowLock", "hadPostCheckAndPut"},
265           tableName,
266           new Boolean[] {true, true, true}
267           );
268     } finally {
269       util.deleteTable(tableName);
270     }
271   }
272 
273   @Test (timeout=300000)
274   public void testCheckAndDeleteHooks() throws IOException {
275     TableName tableName =
276         TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
277     Table table = util.createTable(tableName, new byte[][] {A, B, C});
278     try {
279       Put p = new Put(Bytes.toBytes(0));
280       p.add(A, A, A);
281       table.put(p);
282       Delete d = new Delete(Bytes.toBytes(0));
283       table.delete(d);
284       verifyMethodResult(SimpleRegionObserver.class,
285           new String[] {"hadPreCheckAndDelete",
286               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
287           tableName,
288           new Boolean[] {false, false, false}
289           );
290       table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
291       verifyMethodResult(SimpleRegionObserver.class,
292           new String[] {"hadPreCheckAndDelete",
293               "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"},
294           tableName,
295           new Boolean[] {true, true, true}
296           );
297     } finally {
298       util.deleteTable(tableName);
299       table.close();
300     }
301   }
302 
303   @Test (timeout=300000)
304   public void testAppendHook() throws IOException {
305     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
306     Table table = util.createTable(tableName, new byte[][] {A, B, C});
307     try {
308       Append app = new Append(Bytes.toBytes(0));
309       app.add(A, A, A);
310 
311       verifyMethodResult(SimpleRegionObserver.class,
312           new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
313           tableName,
314           new Boolean[] {false, false, false}
315           );
316 
317       table.append(app);
318 
319       verifyMethodResult(SimpleRegionObserver.class,
320           new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
321           tableName,
322           new Boolean[] {true, true, true}
323           );
324     } finally {
325       util.deleteTable(tableName);
326       table.close();
327     }
328   }
329 
330   @Test (timeout=300000)
331   // HBase-3583
332   public void testHBase3583() throws IOException {
333     TableName tableName =
334         TableName.valueOf("testHBase3583");
335     util.createTable(tableName, new byte[][] {A, B, C});
336     util.waitUntilAllRegionsAssigned(tableName);
337 
338     verifyMethodResult(SimpleRegionObserver.class,
339         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
340             "wasScannerCloseCalled"},
341         tableName,
342         new Boolean[] {false, false, false, false}
343     );
344 
345     Table table = new HTable(util.getConfiguration(), tableName);
346     Put put = new Put(ROW);
347     put.add(A, A, A);
348     table.put(put);
349 
350     Get get = new Get(ROW);
351     get.addColumn(A, A);
352     table.get(get);
353 
354     // verify that scannerNext and scannerClose upcalls won't be invoked
355     // when we perform get().
356     verifyMethodResult(SimpleRegionObserver.class,
357         new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled",
358             "wasScannerCloseCalled"},
359         tableName,
360         new Boolean[] {true, true, false, false}
361     );
362 
363     Scan s = new Scan();
364     ResultScanner scanner = table.getScanner(s);
365     try {
366       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
367       }
368     } finally {
369       scanner.close();
370     }
371 
372     // now scanner hooks should be invoked.
373     verifyMethodResult(SimpleRegionObserver.class,
374         new String[] {"wasScannerNextCalled", "wasScannerCloseCalled"},
375         tableName,
376         new Boolean[] {true, true}
377     );
378     util.deleteTable(tableName);
379     table.close();
380   }
381 
382   @Test (timeout=300000)
383   // HBase-3758
384   public void testHBase3758() throws IOException {
385     TableName tableName =
386         TableName.valueOf("testHBase3758");
387     util.createTable(tableName, new byte[][] {A, B, C});
388 
389     verifyMethodResult(SimpleRegionObserver.class,
390         new String[] {"hadDeleted", "wasScannerOpenCalled"},
391         tableName,
392         new Boolean[] {false, false}
393     );
394 
395     Table table = new HTable(util.getConfiguration(), tableName);
396     Put put = new Put(ROW);
397     put.add(A, A, A);
398     table.put(put);
399 
400     Delete delete = new Delete(ROW);
401     table.delete(delete);
402 
403     verifyMethodResult(SimpleRegionObserver.class,
404         new String[] {"hadDeleted", "wasScannerOpenCalled"},
405         tableName,
406         new Boolean[] {true, false}
407     );
408 
409     Scan s = new Scan();
410     ResultScanner scanner = table.getScanner(s);
411     try {
412       for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
413       }
414     } finally {
415       scanner.close();
416     }
417 
418     // now scanner hooks should be invoked.
419     verifyMethodResult(SimpleRegionObserver.class,
420         new String[] {"wasScannerOpenCalled"},
421         tableName,
422         new Boolean[] {true}
423     );
424     util.deleteTable(tableName);
425     table.close();
426   }
427 
428   /* Overrides compaction to only output rows with keys that are even numbers */
429   public static class EvenOnlyCompactor extends BaseRegionObserver {
430     long lastCompaction;
431     long lastFlush;
432 
433     @Override
434     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
435         Store store, final InternalScanner scanner, final ScanType scanType) {
436       return new InternalScanner() {
437         @Override
438         public boolean next(List<Cell> results) throws IOException {
439           return next(results, NoLimitScannerContext.getInstance());
440         }
441 
442         @Override
443         public boolean next(List<Cell> results, ScannerContext scannerContext)
444             throws IOException {
445           List<Cell> internalResults = new ArrayList<Cell>();
446           boolean hasMore;
447           do {
448             hasMore = scanner.next(internalResults, scannerContext);
449             if (!internalResults.isEmpty()) {
450               long row = Bytes.toLong(CellUtil.cloneValue(internalResults.get(0)));
451               if (row % 2 == 0) {
452                 // return this row
453                 break;
454               }
455               // clear and continue
456               internalResults.clear();
457             }
458           } while (hasMore);
459 
460           if (!internalResults.isEmpty()) {
461             results.addAll(internalResults);
462           }
463           return hasMore;
464         }
465 
466         @Override
467         public void close() throws IOException {
468           scanner.close();
469         }
470       };
471     }
472 
473     @Override
474     public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
475         Store store, StoreFile resultFile) {
476       lastCompaction = EnvironmentEdgeManager.currentTime();
477     }
478 
479     @Override
480     public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) {
481       lastFlush = EnvironmentEdgeManager.currentTime();
482     }
483   }
484   /**
485    * Tests overriding compaction handling via coprocessor hooks
486    * @throws Exception
487    */
488   @Test (timeout=300000)
489   public void testCompactionOverride() throws Exception {
490     TableName compactTable = TableName.valueOf("TestCompactionOverride");
491     Admin admin = util.getHBaseAdmin();
492     if (admin.tableExists(compactTable)) {
493       admin.disableTable(compactTable);
494       admin.deleteTable(compactTable);
495     }
496 
497     HTableDescriptor htd = new HTableDescriptor(compactTable);
498     htd.addFamily(new HColumnDescriptor(A));
499     htd.addCoprocessor(EvenOnlyCompactor.class.getName());
500     admin.createTable(htd);
501 
502     Table table = new HTable(util.getConfiguration(), compactTable);
503     for (long i=1; i<=10; i++) {
504       byte[] iBytes = Bytes.toBytes(i);
505       Put put = new Put(iBytes);
506       put.setDurability(Durability.SKIP_WAL);
507       put.add(A, A, iBytes);
508       table.put(put);
509     }
510 
511     HRegion firstRegion = cluster.getRegions(compactTable).get(0);
512     Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(
513         EvenOnlyCompactor.class.getName());
514     assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
515     EvenOnlyCompactor compactor = (EvenOnlyCompactor)cp;
516 
517     // force a compaction
518     long ts = System.currentTimeMillis();
519     admin.flush(compactTable);
520     // wait for flush
521     for (int i=0; i<10; i++) {
522       if (compactor.lastFlush >= ts) {
523         break;
524       }
525       Thread.sleep(1000);
526     }
527     assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
528     LOG.debug("Flush complete");
529 
530     ts = compactor.lastFlush;
531     admin.majorCompact(compactTable);
532     // wait for compaction
533     for (int i=0; i<30; i++) {
534       if (compactor.lastCompaction >= ts) {
535         break;
536       }
537       Thread.sleep(1000);
538     }
539     LOG.debug("Last compaction was at "+compactor.lastCompaction);
540     assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
541 
542     // only even rows should remain
543     ResultScanner scanner = table.getScanner(new Scan());
544     try {
545       for (long i=2; i<=10; i+=2) {
546         Result r = scanner.next();
547         assertNotNull(r);
548         assertFalse(r.isEmpty());
549         byte[] iBytes = Bytes.toBytes(i);
550         assertArrayEquals("Row should be "+i, r.getRow(), iBytes);
551         assertArrayEquals("Value should be "+i, r.getValue(A, A), iBytes);
552       }
553     } finally {
554       scanner.close();
555     }
556     table.close();
557   }
558 
559   @Test (timeout=300000)
560   public void bulkLoadHFileTest() throws Exception {
561     String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
562     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
563     Configuration conf = util.getConfiguration();
564     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
565     try {
566       verifyMethodResult(SimpleRegionObserver.class,
567           new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
568           tableName,
569           new Boolean[] {false, false}
570           );
571 
572       FileSystem fs = util.getTestFileSystem();
573       final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
574       Path familyDir = new Path(dir, Bytes.toString(A));
575 
576       createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
577 
578       // Bulk load
579       new LoadIncrementalHFiles(conf).doBulkLoad(dir, table);
580 
581       verifyMethodResult(SimpleRegionObserver.class,
582           new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
583           tableName,
584           new Boolean[] {true, true}
585           );
586     } finally {
587       util.deleteTable(tableName);
588       table.close();
589     }
590   }
591 
592   @Ignore // TODO: HBASE-13391 to fix flaky test
593   @Test (timeout=300000)
594   public void testRecovery() throws Exception {
595     LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery");
596     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
597     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
598     try {
599       JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
600       ServerName sn2 = rs1.getRegionServer().getServerName();
601       String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();
602 
603       util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
604       while (!sn2.equals(table.getRegionLocations().firstEntry().getValue() )){
605         Thread.sleep(100);
606       }
607 
608       Put put = new Put(ROW);
609       put.add(A, A, A);
610       put.add(B, B, B);
611       put.add(C, C, C);
612       table.put(put);
613 
614       verifyMethodResult(SimpleRegionObserver.class,
615           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
616         "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
617         tableName,
618         new Boolean[] {false, false, true, true, true, true, false}
619           );
620 
621       verifyMethodResult(SimpleRegionObserver.class,
622           new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
623               "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
624           tableName,
625           new Integer[] {0, 0, 1, 1, 0, 0});
626 
627       cluster.killRegionServer(rs1.getRegionServer().getServerName());
628       Threads.sleep(1000); // Let the kill soak in.
629       util.waitUntilAllRegionsAssigned(tableName);
630       LOG.info("All regions assigned");
631 
632       verifyMethodResult(SimpleRegionObserver.class,
633           new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
634               "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
635           tableName,
636           new Integer[]{1, 1, 0, 0, 0, 0});
637     } finally {
638       util.deleteTable(tableName);
639       table.close();
640     }
641   }
642 
643   @Ignore // TODO: HBASE-13391 to fix flaky test
644   @Test (timeout=300000)
645   public void testLegacyRecovery() throws Exception {
646     LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery");
647     TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testLegacyRecovery");
648     HTable table = util.createTable(tableName, new byte[][] {A, B, C});
649     try {
650       JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
651       ServerName sn2 = rs1.getRegionServer().getServerName();
652       String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();
653 
654       util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
655       while (!sn2.equals(table.getRegionLocations().firstEntry().getValue() )){
656         Thread.sleep(100);
657       }
658 
659       Put put = new Put(ROW);
660       put.add(A, A, A);
661       put.add(B, B, B);
662       put.add(C, C, C);
663       table.put(put);
664 
665       verifyMethodResult(SimpleRegionObserver.Legacy.class,
666           new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
667         "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete"},
668         tableName,
669         new Boolean[] {false, false, true, true, true, true, false}
670           );
671 
672       verifyMethodResult(SimpleRegionObserver.Legacy.class,
673           new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
674               "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
675           tableName,
676           new Integer[] {0, 0, 1, 1, 0, 0});
677 
678       cluster.killRegionServer(rs1.getRegionServer().getServerName());
679       Threads.sleep(1000); // Let the kill soak in.
680       util.waitUntilAllRegionsAssigned(tableName);
681       LOG.info("All regions assigned");
682 
683       verifyMethodResult(SimpleRegionObserver.Legacy.class,
684           new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut",
685               "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
686           tableName,
687           new Integer[]{1, 1, 0, 0, 1, 1});
688     } finally {
689       util.deleteTable(tableName);
690       table.close();
691     }
692   }
693 
694   @Test (timeout=300000)
695   public void testPreWALRestoreSkip() throws Exception {
696     LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
697     TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
698     HTable table = util.createTable(tableName, new byte[][] { A, B, C });
699 
700     JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
701     ServerName sn2 = rs1.getRegionServer().getServerName();
702     String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();
703 
704     util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
705     while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
706       Thread.sleep(100);
707     }
708 
709     Put put = new Put(ROW);
710     put.add(A, A, A);
711     put.add(B, B, B);
712     put.add(C, C, C);
713     table.put(put);
714     table.flushCommits();
715 
716     cluster.killRegionServer(rs1.getRegionServer().getServerName());
717     Threads.sleep(20000); // just to be sure that the kill has fully started.
718     util.waitUntilAllRegionsAssigned(tableName);
719 
720     verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
721         "getCtPostWALRestore", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
722         tableName,
723         new Integer[] {0, 0, 0, 0});
724 
725     util.deleteTable(tableName);
726     table.close();
727   }
728 
729   // check each region whether the coprocessor upcalls are called or not.
730   private void verifyMethodResult(Class<?> c, String methodName[], TableName tableName,
731                                   Object value[]) throws IOException {
732     try {
733       for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
734         if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()){
735           continue;
736         }
737         for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer().getRSRpcServices())) {
738           if (!r.getTable().equals(tableName)) {
739             continue;
740           }
741           RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
742               getCoprocessorHost();
743 
744           Coprocessor cp = cph.findCoprocessor(c.getName());
745           assertNotNull(cp);
746           for (int i = 0; i < methodName.length; ++i) {
747             Method m = c.getMethod(methodName[i]);
748             Object o = m.invoke(cp);
749             assertTrue("Result of " + c.getName() + "." + methodName[i]
750                 + " is expected to be " + value[i].toString()
751                 + ", while we get " + o.toString(), o.equals(value[i]));
752           }
753         }
754       }
755     } catch (Exception e) {
756       throw new IOException(e.toString());
757     }
758   }
759 
760   private static void createHFile(
761       Configuration conf,
762       FileSystem fs, Path path,
763       byte[] family, byte[] qualifier) throws IOException {
764     HFileContext context = new HFileContextBuilder().build();
765     HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
766         .withPath(fs, path)
767         .withFileContext(context)
768         .create();
769     long now = System.currentTimeMillis();
770     try {
771       for (int i =1;i<=9;i++) {
772         KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
773         writer.append(kv);
774       }
775     } finally {
776       writer.close();
777     }
778   }
779 }