View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertTrue;
25  import static org.mockito.Mockito.doReturn;
26  import static org.mockito.Mockito.spy;
27  
28  import java.io.IOException;
29  import java.util.List;
30  import java.util.Map;
31  import java.util.SortedMap;
32  import java.util.TreeMap;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FSDataOutputStream;
38  import org.apache.hadoop.fs.FileStatus;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.ChoreService;
42  import org.apache.hadoop.hbase.CoordinatedStateManager;
43  import org.apache.hadoop.hbase.HBaseTestingUtility;
44  import org.apache.hadoop.hbase.HColumnDescriptor;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionInfo;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.MetaMockingUtil;
49  import org.apache.hadoop.hbase.NamespaceDescriptor;
50  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
51  import org.apache.hadoop.hbase.ProcedureInfo;
52  import org.apache.hadoop.hbase.Server;
53  import org.apache.hadoop.hbase.ServerName;
54  import org.apache.hadoop.hbase.testclassification.SmallTests;
55  import org.apache.hadoop.hbase.TableDescriptors;
56  import org.apache.hadoop.hbase.TableName;
57  import org.apache.hadoop.hbase.TableStateManager;
58  import org.apache.hadoop.hbase.backup.BackupType;
59  import org.apache.hadoop.hbase.client.ClusterConnection;
60  import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
61  import org.apache.hadoop.hbase.client.Result;
62  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
63  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
64  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
65  import org.apache.hadoop.hbase.executor.ExecutorService;
66  import org.apache.hadoop.hbase.io.Reference;
67  import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
68  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
69  import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
70  import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
71  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
72  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
73  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
74  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
75  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
76  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
77  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
78  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
79  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
80  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
81  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
82  import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
83  import org.apache.hadoop.hbase.regionserver.HStore;
84  import org.apache.hadoop.hbase.util.Bytes;
85  import org.apache.hadoop.hbase.util.FSUtils;
86  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
87  import org.apache.hadoop.hbase.util.Pair;
88  import org.apache.hadoop.hbase.util.Triple;
89  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
90  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
91  import org.junit.Test;
92  import org.junit.experimental.categories.Category;
93  import org.mockito.Mockito;
94  import org.mockito.invocation.InvocationOnMock;
95  import org.mockito.stubbing.Answer;
96  
97  import com.google.protobuf.RpcController;
98  import com.google.protobuf.Service;
99  import com.google.protobuf.ServiceException;
100 
101 @Category(SmallTests.class)
102 public class TestCatalogJanitor {
103   private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
104 
105   /**
106    * Pseudo server for below tests.
107    * Be sure to call stop on the way out else could leave some mess around.
108    */
109   class MockServer implements Server {
110     private final ClusterConnection connection;
111     private final Configuration c;
112 
113     MockServer(final HBaseTestingUtility htu)
114     throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
115       this.c = htu.getConfiguration();
116       ClientProtos.ClientService.BlockingInterface ri =
117         Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
118       MutateResponse.Builder builder = MutateResponse.newBuilder();
119       builder.setProcessed(true);
120       try {
121         Mockito.when(ri.mutate(
122           (RpcController)Mockito.any(), (MutateRequest)Mockito.any())).
123             thenReturn(builder.build());
124       } catch (ServiceException se) {
125         throw ProtobufUtil.getRemoteException(se);
126       }
127       try {
128         Mockito.when(ri.multi(
129           (RpcController)Mockito.any(), (MultiRequest)Mockito.any())).
130             thenAnswer(new Answer<MultiResponse>() {
131               @Override
132               public MultiResponse answer(InvocationOnMock invocation) throws Throwable {
133                 return buildMultiResponse( (MultiRequest)invocation.getArguments()[1]);
134               }
135             });
136       } catch (ServiceException se) {
137         throw ProtobufUtil.getRemoteException(se);
138       }
139       // Mock an HConnection and a AdminProtocol implementation.  Have the
140       // HConnection return the HRI.  Have the HRI return a few mocked up responses
141       // to make our test work.
142       this.connection =
143         HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c,
144           Mockito.mock(AdminProtos.AdminService.BlockingInterface.class), ri,
145             ServerName.valueOf("example.org,12345,6789"),
146           HRegionInfo.FIRST_META_REGIONINFO);
147       // Set hbase.rootdir into test dir.
148       FileSystem fs = FileSystem.get(this.c);
149       Path rootdir = FSUtils.getRootDir(this.c);
150       FSUtils.setRootDir(this.c, rootdir);
151       AdminProtos.AdminService.BlockingInterface hri =
152         Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
153     }
154 
155     @Override
156     public ClusterConnection getConnection() {
157       return this.connection;
158     }
159 
160     @Override
161     public MetaTableLocator getMetaTableLocator() {
162       return null;
163     }
164 
165     @Override
166     public Configuration getConfiguration() {
167       return this.c;
168     }
169 
170     @Override
171     public ServerName getServerName() {
172       return ServerName.valueOf("mockserver.example.org", 1234, -1L);
173     }
174 
175     @Override
176     public ZooKeeperWatcher getZooKeeper() {
177       return null;
178     }
179 
180     @Override
181     public CoordinatedStateManager getCoordinatedStateManager() {
182       BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class);
183       SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class);
184       Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c);
185       SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class);
186       Mockito.when(c.getDetails()).thenReturn(d);
187       return m;
188     }
189 
190     @Override
191     public void abort(String why, Throwable e) {
192       //no-op
193     }
194 
195     @Override
196     public boolean isAborted() {
197       return false;
198     }
199 
200     @Override
201     public boolean isStopped() {
202       return false;
203     }
204 
205     @Override
206     public void stop(String why) {
207     }
208 
209     @Override
210     public ChoreService getChoreService() {
211       return null;
212     }
213   }
214 
215   /**
216    * Mock MasterServices for tests below.
217    */
218   class MockMasterServices implements MasterServices {
219     private final MasterFileSystem mfs;
220     private final AssignmentManager asm;
221     private final ServerManager sm;
222 
223     MockMasterServices(final Server server) throws IOException {
224       this.mfs = new MasterFileSystem(server, this);
225       this.asm = Mockito.mock(AssignmentManager.class);
226       this.sm = Mockito.mock(ServerManager.class);
227     }
228 
229     @Override
230     public void checkTableModifiable(TableName tableName) throws IOException {
231       //no-op
232     }
233 
234     @Override
235     public boolean isMasterProcedureExecutorEnabled() {
236       return true;
237     }
238 
239     @Override
240     public long createTable(
241         final HTableDescriptor desc,
242         final byte[][] splitKeys,
243         final long nonceGroup,
244         final long nonce) throws IOException {
245       // no-op
246       return -1;
247     }
248 
249     @Override
250     public SnapshotManager getSnapshotManager() {
251       return null;
252     }
253 
254     @Override
255     public MasterProcedureManagerHost getMasterProcedureManagerHost() {
256       return null;
257     }
258 
259     @Override
260     public AssignmentManager getAssignmentManager() {
261       return this.asm;
262     }
263 
264     @Override
265     public ExecutorService getExecutorService() {
266       return null;
267     }
268 
269     @Override
270     public ChoreService getChoreService() {
271       return null;
272     }
273 
274     @Override
275     public MasterFileSystem getMasterFileSystem() {
276       return this.mfs;
277     }
278 
279     @Override
280     public MasterCoprocessorHost getMasterCoprocessorHost() {
281       return null;
282     }
283 
284     @Override
285     public MasterQuotaManager getMasterQuotaManager() {
286       return null;
287     }
288 
289     @Override
290     public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
291       return null;
292     }
293 
294     @Override
295     public ServerManager getServerManager() {
296       return sm;
297     }
298 
299     @Override
300     public ZooKeeperWatcher getZooKeeper() {
301       return null;
302     }
303 
304     @Override
305     public CoordinatedStateManager getCoordinatedStateManager() {
306       return null;
307     }
308 
309     @Override
310     public MetaTableLocator getMetaTableLocator() {
311       return null;
312     }
313 
314     @Override
315     public ClusterConnection getConnection() {
316       return null;
317     }
318 
319     @Override
320     public Configuration getConfiguration() {
321       return mfs.conf;
322     }
323 
324     @Override
325     public ServerName getServerName() {
326       return null;
327     }
328 
329     @Override
330     public void abort(String why, Throwable e) {
331       //no-op
332     }
333 
334     @Override
335     public boolean isAborted() {
336       return false;
337     }
338 
339     private boolean stopped = false;
340 
341     @Override
342     public void stop(String why) {
343       stopped = true;
344     }
345 
346     @Override
347     public boolean isStopped() {
348       return stopped;
349     }
350 
351     @Override
352     public TableDescriptors getTableDescriptors() {
353       return new TableDescriptors() {
354         @Override
355         public HTableDescriptor remove(TableName tablename) throws IOException {
356           // TODO Auto-generated method stub
357           return null;
358         }
359 
360         @Override
361         public Map<String, HTableDescriptor> getAll() throws IOException {
362           // TODO Auto-generated method stub
363           return null;
364         }
365 
366         @Override
367         public HTableDescriptor get(TableName tablename)
368         throws IOException {
369           return createHTableDescriptor();
370         }
371 
372         @Override
373         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
374           return null;
375         }
376 
377         @Override
378         public void add(HTableDescriptor htd) throws IOException {
379           // TODO Auto-generated method stub
380 
381         }
382         @Override
383         public void setCacheOn() throws IOException {
384         }
385 
386         @Override
387         public void setCacheOff() throws IOException {
388         }
389       };
390     }
391 
392     @Override
393     public boolean isServerShutdownHandlerEnabled() {
394       return true;
395     }
396 
397     @Override
398     public boolean registerService(Service instance) {
399       return false;
400     }
401 
402     @Override
403     public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
404       //To change body of implemented methods use File | Settings | File Templates.
405     }
406 
407     @Override
408     public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
409       //To change body of implemented methods use File | Settings | File Templates.
410     }
411 
412     @Override
413     public void deleteNamespace(String name) throws IOException {
414       //To change body of implemented methods use File | Settings | File Templates.
415     }
416 
417     @Override
418     public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
419       return null;  //To change body of implemented methods use File | Settings | File Templates.
420     }
421 
422     @Override
423     public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
424       return null;  //To change body of implemented methods use File | Settings | File Templates.
425     }
426 
427     @Override
428     public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
429         throws IOException {
430       return false;  //To change body of implemented methods use File | Settings | File Templates.
431     }
432 
433     @Override
434     public List<ProcedureInfo> listProcedures() throws IOException {
435       return null;  //To change body of implemented methods use File | Settings | File Templates.
436     }
437 
438     @Override
439     public Pair<Long, String> backupTables(
440         final BackupType type,
441         final List<TableName> tableList,
442         final String targetRootDir, final int workers,
443         final long bandwidth) throws IOException {
444       return null;
445     }
446 
447 
448     @Override
449     public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
450       return null;  //To change body of implemented methods use File | Settings | File Templates.
451     }
452 
453     @Override
454     public List<TableName> listTableNamesByNamespace(String name) throws IOException {
455       return null;
456     }
457 
458     @Override
459     public long deleteTable(
460         final TableName tableName,
461         final long nonceGroup,
462         final long nonce) throws IOException {
463       return -1;
464     }
465     @Override
466     public LoadBalancer getLoadBalancer() {
467       return null;
468     }
469 
470     public void truncateTable(
471         final TableName tableName,
472         final boolean preserveSplits,
473         final long nonceGroup,
474         final long nonce) throws IOException {
475     }
476 
477     @Override
478     public void modifyTable(
479         final TableName tableName,
480         final HTableDescriptor descriptor,
481         final long nonceGroup,
482         final long nonce) throws IOException {
483     }
484 
485     @Override
486     public long enableTable(
487         final TableName tableName,
488         final long nonceGroup,
489         final long nonce) throws IOException {
490       return -1;
491     }
492 
493     @Override
494     public long disableTable(
495         TableName tableName,
496         final long nonceGroup,
497         final long nonce) throws IOException {
498       return -1;
499     }
500 
501     @Override
502     public void addColumn(
503         final TableName tableName,
504         final HColumnDescriptor columnDescriptor,
505         final long nonceGroup,
506         final long nonce) throws IOException { }
507 
508     @Override
509     public void modifyColumn(
510         final TableName tableName,
511         final HColumnDescriptor descriptor,
512         final long nonceGroup,
513         final long nonce) throws IOException { }
514 
515     @Override
516     public void deleteColumn(
517         final TableName tableName,
518         final byte[] columnName,
519         final long nonceGroup,
520         final long nonce) throws IOException { }
521 
522     @Override
523     public TableLockManager getTableLockManager() {
524       return null;
525     }
526 
527     @Override
528     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
529         boolean forcible) throws IOException {
530     }
531 
532     @Override
533     public boolean isInitialized() {
534       // Auto-generated method stub
535       return false;
536     }
537 
538     @Override
539     public boolean isNamespaceManagerInitialized() {
540       return false;
541     }
542 
543     @Override
544     public boolean isInMaintenanceMode() {
545       return false;
546     }
547 
548     @Override
549     public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
550       // Auto-generated method stub
551       return 0;
552     }
553 
554     @Override
555     public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
556       // Auto-generated method stub
557       return 0;
558     }
559 
560     @Override
561     public TableStateManager getTableStateManager() {
562       // TODO Auto-generated method stub
563       return null;
564     }
565 
566     @Override
567     public String getRegionServerVersion(ServerName sn) {
568       return null;
569     }
570 
571     @Override
572     public void checkIfShouldMoveSystemRegionAsync() {
573     }
574   }
575 
576   @Test
577   public void testCleanParent() throws IOException, InterruptedException {
578     HBaseTestingUtility htu = new HBaseTestingUtility();
579     setRootDirAndCleanIt(htu, "testCleanParent");
580     Server server = new MockServer(htu);
581     try {
582       MasterServices services = new MockMasterServices(server);
583       CatalogJanitor janitor = new CatalogJanitor(server, services);
584       // Create regions.
585       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
586       htd.addFamily(new HColumnDescriptor("f"));
587       HRegionInfo parent =
588         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
589             Bytes.toBytes("eee"));
590       HRegionInfo splita =
591         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
592             Bytes.toBytes("ccc"));
593       HRegionInfo splitb =
594         new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
595             Bytes.toBytes("eee"));
596       // Test that when both daughter regions are in place, that we do not
597       // remove the parent.
598       Result r = createResult(parent, splita, splitb);
599       // Add a reference under splitA directory so we don't clear out the parent.
600       Path rootdir = services.getMasterFileSystem().getRootDir();
601       Path tabledir =
602         FSUtils.getTableDir(rootdir, htd.getTableName());
603       Path storedir = HStore.getStoreHomedir(tabledir, splita,
604           htd.getColumnFamilies()[0].getName());
605       Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
606       long now = System.currentTimeMillis();
607       // Reference name has this format: StoreFile#REF_NAME_PARSER
608       Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
609       FileSystem fs = services.getMasterFileSystem().getFileSystem();
610       Path path = ref.write(fs, p);
611       assertTrue(fs.exists(path));
612       assertFalse(janitor.cleanParent(parent, r));
613       // Remove the reference file and try again.
614       assertTrue(fs.delete(p, true));
615       assertTrue(janitor.cleanParent(parent, r));
616     } finally {
617       server.stop("shutdown");
618     }
619   }
620 
621   /**
622    * Make sure parent gets cleaned up even if daughter is cleaned up before it.
623    * @throws IOException
624    * @throws InterruptedException
625    */
626   @Test
627   public void testParentCleanedEvenIfDaughterGoneFirst()
628   throws IOException, InterruptedException {
629     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
630       "testParentCleanedEvenIfDaughterGoneFirst", Bytes.toBytes("eee"));
631   }
632 
633   /**
634    * Make sure last parent with empty end key gets cleaned up even if daughter is cleaned up before it.
635    * @throws IOException
636    * @throws InterruptedException
637    */
638   @Test
639   public void testLastParentCleanedEvenIfDaughterGoneFirst()
640   throws IOException, InterruptedException {
641     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
642       "testLastParentCleanedEvenIfDaughterGoneFirst", new byte[0]);
643   }
644 
645   /**
646    * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
647    *
648    * @param rootDir the test case name, used as the HBase testing utility root
649    * @param lastEndKey the end key of the split parent
650    * @throws IOException
651    * @throws InterruptedException
652    */
653   private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
654   final String rootDir, final byte[] lastEndKey)
655   throws IOException, InterruptedException {
656     HBaseTestingUtility htu = new HBaseTestingUtility();
657     setRootDirAndCleanIt(htu, rootDir);
658     Server server = new MockServer(htu);
659     MasterServices services = new MockMasterServices(server);
660     CatalogJanitor janitor = new CatalogJanitor(server, services);
661     final HTableDescriptor htd = createHTableDescriptor();
662 
663     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
664 
665     // Parent
666     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
667       lastEndKey);
668     // Sleep a second else the encoded name on these regions comes out
669     // same for all with same start key and made in same second.
670     Thread.sleep(1001);
671 
672     // Daughter a
673     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
674       Bytes.toBytes("ccc"));
675     Thread.sleep(1001);
676     // Make daughters of daughter a; splitaa and splitab.
677     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
678       Bytes.toBytes("bbb"));
679     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
680       Bytes.toBytes("ccc"));
681 
682     // Daughter b
683     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
684       lastEndKey);
685     Thread.sleep(1001);
686     // Make Daughters of daughterb; splitba and splitbb.
687     HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
688       Bytes.toBytes("ddd"));
689     HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"),
690     lastEndKey);
691 
692     // First test that our Comparator works right up in CatalogJanitor.
693     // Just fo kicks.
694     SortedMap<HRegionInfo, Result> regions =
695       new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
696     // Now make sure that this regions map sorts as we expect it to.
697     regions.put(parent, createResult(parent, splita, splitb));
698     regions.put(splitb, createResult(splitb, splitba, splitbb));
699     regions.put(splita, createResult(splita, splitaa, splitab));
700     // Assert its properly sorted.
701     int index = 0;
702     for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
703       if (index == 0) {
704         assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
705       } else if (index == 1) {
706         assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
707       } else if (index == 2) {
708         assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
709       }
710       index++;
711     }
712 
713     // Now play around with the cleanParent function.  Create a ref from splita
714     // up to the parent.
715     Path splitaRef =
716       createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
717     // Make sure actual super parent sticks around because splita has a ref.
718     assertFalse(janitor.cleanParent(parent, regions.get(parent)));
719 
720     //splitba, and split bb, do not have dirs in fs.  That means that if
721     // we test splitb, it should get cleaned up.
722     assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
723 
724     // Now remove ref from splita to parent... so parent can be let go and so
725     // the daughter splita can be split (can't split if still references).
726     // BUT make the timing such that the daughter gets cleaned up before we
727     // can get a chance to let go of the parent.
728     FileSystem fs = FileSystem.get(htu.getConfiguration());
729     assertTrue(fs.delete(splitaRef, true));
730     // Create the refs from daughters of splita.
731     Path splitaaRef =
732       createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
733     Path splitabRef =
734       createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
735 
736     // Test splita.  It should stick around because references from splitab, etc.
737     assertFalse(janitor.cleanParent(splita, regions.get(splita)));
738 
739     // Now clean up parent daughter first.  Remove references from its daughters.
740     assertTrue(fs.delete(splitaaRef, true));
741     assertTrue(fs.delete(splitabRef, true));
742     assertTrue(janitor.cleanParent(splita, regions.get(splita)));
743 
744     // Super parent should get cleaned up now both splita and splitb are gone.
745     assertTrue(janitor.cleanParent(parent, regions.get(parent)));
746 
747     services.stop("test finished");
748     janitor.cancel(true);
749   }
750 
751   /**
752    * CatalogJanitor.scan() should not clean parent regions if their own
753    * parents are still referencing them. This ensures that grandfather regions
754    * do not point to deleted parent regions.
755    */
756   @Test
757   public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
758     HBaseTestingUtility htu = new HBaseTestingUtility();
759     setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
760     Server server = new MockServer(htu);
761     MasterServices services = new MockMasterServices(server);
762 
763     final HTableDescriptor htd = createHTableDescriptor();
764 
765     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
766 
767     // Parent
768     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
769       new byte[0], true);
770     // Sleep a second else the encoded name on these regions comes out
771     // same for all with same start key and made in same second.
772     Thread.sleep(1001);
773 
774     // Daughter a
775     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
776       Bytes.toBytes("ccc"), true);
777     Thread.sleep(1001);
778     // Make daughters of daughter a; splitaa and splitab.
779     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
780       Bytes.toBytes("bbb"), false);
781     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
782       Bytes.toBytes("ccc"), false);
783 
784     // Daughter b
785     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
786         new byte[0]);
787     Thread.sleep(1001);
788 
789     final Map<HRegionInfo, Result> splitParents =
790         new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
791     splitParents.put(parent, createResult(parent, splita, splitb));
792     splita.setOffline(true); //simulate that splita goes offline when it is split
793     splitParents.put(splita, createResult(splita, splitaa,splitab));
794 
795     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
796     CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
797     doReturn(new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
798             10, mergedRegions, splitParents)).when(janitor)
799         .getMergedRegionsAndSplitParents();
800 
801     //create ref from splita to parent
802     Path splitaRef =
803         createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
804 
805     //parent and A should not be removed
806     assertEquals(0, janitor.scan());
807 
808     //now delete the ref
809     FileSystem fs = FileSystem.get(htu.getConfiguration());
810     assertTrue(fs.delete(splitaRef, true));
811 
812     //now, both parent, and splita can be deleted
813     assertEquals(2, janitor.scan());
814 
815     services.stop("test finished");
816     janitor.cancel(true);
817   }
818 
819   /**
820    * Test that we correctly archive all the storefiles when a region is deleted
821    * @throws Exception
822    */
823   @Test
824   public void testSplitParentFirstComparator() {
825     SplitParentFirstComparator comp = new SplitParentFirstComparator();
826     final HTableDescriptor htd = createHTableDescriptor();
827 
828     /*  Region splits:
829      *
830      *  rootRegion --- firstRegion --- firstRegiona
831      *              |               |- firstRegionb
832      *              |
833      *              |- lastRegion --- lastRegiona  --- lastRegionaa
834      *                             |                |- lastRegionab
835      *                             |- lastRegionb
836      *
837      *  rootRegion   :   []  - []
838      *  firstRegion  :   []  - bbb
839      *  lastRegion   :   bbb - []
840      *  firstRegiona :   []  - aaa
841      *  firstRegionb :   aaa - bbb
842      *  lastRegiona  :   bbb - ddd
843      *  lastRegionb  :   ddd - []
844      */
845 
846     // root region
847     HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(),
848       HConstants.EMPTY_START_ROW,
849       HConstants.EMPTY_END_ROW, true);
850     HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(),
851       HConstants.EMPTY_START_ROW,
852       Bytes.toBytes("bbb"), true);
853     HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(),
854       Bytes.toBytes("bbb"),
855       HConstants.EMPTY_END_ROW, true);
856 
857     assertTrue(comp.compare(rootRegion, rootRegion) == 0);
858     assertTrue(comp.compare(firstRegion, firstRegion) == 0);
859     assertTrue(comp.compare(lastRegion, lastRegion) == 0);
860     assertTrue(comp.compare(rootRegion, firstRegion) < 0);
861     assertTrue(comp.compare(rootRegion, lastRegion) < 0);
862     assertTrue(comp.compare(firstRegion, lastRegion) < 0);
863 
864     //first region split into a, b
865     HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(),
866       HConstants.EMPTY_START_ROW,
867       Bytes.toBytes("aaa"), true);
868     HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(),
869         Bytes.toBytes("aaa"),
870       Bytes.toBytes("bbb"), true);
871     //last region split into a, b
872     HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(),
873       Bytes.toBytes("bbb"),
874       Bytes.toBytes("ddd"), true);
875     HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(),
876       Bytes.toBytes("ddd"),
877       HConstants.EMPTY_END_ROW, true);
878 
879     assertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
880     assertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
881     assertTrue(comp.compare(rootRegion, firstRegiona) < 0);
882     assertTrue(comp.compare(rootRegion, firstRegionb) < 0);
883     assertTrue(comp.compare(firstRegion, firstRegiona) < 0);
884     assertTrue(comp.compare(firstRegion, firstRegionb) < 0);
885     assertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
886 
887     assertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
888     assertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
889     assertTrue(comp.compare(rootRegion, lastRegiona) < 0);
890     assertTrue(comp.compare(rootRegion, lastRegionb) < 0);
891     assertTrue(comp.compare(lastRegion, lastRegiona) < 0);
892     assertTrue(comp.compare(lastRegion, lastRegionb) < 0);
893     assertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
894 
895     assertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
896     assertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
897     assertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
898     assertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
899 
900     HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(),
901       Bytes.toBytes("bbb"),
902       Bytes.toBytes("ccc"), false);
903     HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(),
904       Bytes.toBytes("ccc"),
905       Bytes.toBytes("ddd"), false);
906 
907     assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
908     assertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
909     assertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
910 
911   }
912 
913   @Test
914   public void testArchiveOldRegion() throws Exception {
915     String table = "table";
916     HBaseTestingUtility htu = new HBaseTestingUtility();
917     setRootDirAndCleanIt(htu, "testCleanParent");
918     Server server = new MockServer(htu);
919     MasterServices services = new MockMasterServices(server);
920 
921     // create the janitor
922     CatalogJanitor janitor = new CatalogJanitor(server, services);
923 
924     // Create regions.
925     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
926     htd.addFamily(new HColumnDescriptor("f"));
927     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
928         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
929     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
930         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
931     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
932         Bytes.toBytes("ccc"),
933         Bytes.toBytes("eee"));
934 
935     // Test that when both daughter regions are in place, that we do not
936     // remove the parent.
937     Result parentMetaRow = createResult(parent, splita, splitb);
938     FileSystem fs = FileSystem.get(htu.getConfiguration());
939     Path rootdir = services.getMasterFileSystem().getRootDir();
940     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
941     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
942     // the single test passes, but when the full suite is run, things get borked).
943     FSUtils.setRootDir(fs.getConf(), rootdir);
944     Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
945     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
946     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
947       tabledir, htd.getColumnFamilies()[0].getName());
948     LOG.debug("Table dir:" + tabledir);
949     LOG.debug("Store dir:" + storedir);
950     LOG.debug("Store archive dir:" + storeArchive);
951 
952     // add a couple of store files that we can check for
953     FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
954     // get the current store files for comparison
955     FileStatus[] storeFiles = fs.listStatus(storedir);
956     int index = 0;
957     for (FileStatus file : storeFiles) {
958       LOG.debug("Have store file:" + file.getPath());
959       assertEquals("Got unexpected store file", mockFiles[index].getPath(),
960         storeFiles[index].getPath());
961       index++;
962     }
963 
964     // do the cleaning of the parent
965     assertTrue(janitor.cleanParent(parent, parentMetaRow));
966     LOG.debug("Finished cleanup of parent region");
967 
968     // and now check to make sure that the files have actually been archived
969     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
970     logFiles("archived files", storeFiles);
971     logFiles("archived files", archivedStoreFiles);
972 
973     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
974 
975     // cleanup
976     FSUtils.delete(fs, rootdir, true);
977     services.stop("Test finished");
978     server.stop("Test finished");
979     janitor.cancel(true);
980   }
981 
982   /**
983    * @param description description of the files for logging
984    * @param storeFiles the status of the files to log
985    */
986   private void logFiles(String description, FileStatus[] storeFiles) {
987     LOG.debug("Current " + description + ": ");
988     for (FileStatus file : storeFiles) {
989       LOG.debug(file.getPath());
990     }
991   }
992 
993   /**
994    * Test that if a store file with the same name is present as those already backed up cause the
995    * already archived files to be timestamped backup
996    */
997   @Test
998   public void testDuplicateHFileResolution() throws Exception {
999     String table = "table";
1000     HBaseTestingUtility htu = new HBaseTestingUtility();
1001     setRootDirAndCleanIt(htu, "testCleanParent");
1002     Server server = new MockServer(htu);
1003     MasterServices services = new MockMasterServices(server);
1004 
1005     // create the janitor
1006 
1007     CatalogJanitor janitor = new CatalogJanitor(server, services);
1008 
1009     // Create regions.
1010     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
1011     htd.addFamily(new HColumnDescriptor("f"));
1012     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
1013         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
1014     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
1015         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
1016     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
1017         Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
1018     // Test that when both daughter regions are in place, that we do not
1019     // remove the parent.
1020     Result r = createResult(parent, splita, splitb);
1021 
1022     FileSystem fs = FileSystem.get(htu.getConfiguration());
1023 
1024     Path rootdir = services.getMasterFileSystem().getRootDir();
1025     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
1026     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
1027     // the single test passes, but when the full suite is run, things get borked).
1028     FSUtils.setRootDir(fs.getConf(), rootdir);
1029     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
1030     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
1031     System.out.println("Old root:" + rootdir);
1032     System.out.println("Old table:" + tabledir);
1033     System.out.println("Old store:" + storedir);
1034 
1035     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
1036       tabledir, htd.getColumnFamilies()[0].getName());
1037     System.out.println("Old archive:" + storeArchive);
1038 
1039     // enable archiving, make sure that files get archived
1040     addMockStoreFiles(2, services, storedir);
1041     // get the current store files for comparison
1042     FileStatus[] storeFiles = fs.listStatus(storedir);
1043     // do the cleaning of the parent
1044     assertTrue(janitor.cleanParent(parent, r));
1045 
1046     // and now check to make sure that the files have actually been archived
1047     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
1048     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
1049 
1050     // now add store files with the same names as before to check backup
1051     // enable archiving, make sure that files get archived
1052     addMockStoreFiles(2, services, storedir);
1053 
1054     // do the cleaning of the parent
1055     assertTrue(janitor.cleanParent(parent, r));
1056 
1057     // and now check to make sure that the files have actually been archived
1058     archivedStoreFiles = fs.listStatus(storeArchive);
1059     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
1060 
1061     // cleanup
1062     services.stop("Test finished");
1063     server.stop("shutdown");
1064     janitor.cancel(true);
1065   }
1066 
1067   private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
1068       throws IOException {
1069     // get the existing store files
1070     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1071     fs.mkdirs(storedir);
1072     // create the store files in the parent
1073     for (int i = 0; i < count; i++) {
1074       Path storeFile = new Path(storedir, "_store" + i);
1075       FSDataOutputStream dos = fs.create(storeFile, true);
1076       dos.writeBytes("Some data: " + i);
1077       dos.close();
1078     }
1079     LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
1080     // make sure the mock store files are there
1081     FileStatus[] storeFiles = fs.listStatus(storedir);
1082     assertEquals("Didn't have expected store files", count, storeFiles.length);
1083     return storeFiles;
1084   }
1085 
1086   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
1087       final String subdir)
1088   throws IOException {
1089     Path testdir = htu.getDataTestDir(subdir);
1090     FileSystem fs = FileSystem.get(htu.getConfiguration());
1091     if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
1092     FSUtils.setRootDir(htu.getConfiguration(), testdir);
1093     return FSUtils.getRootDir(htu.getConfiguration()).toString();
1094   }
1095 
1096   /**
1097    * @param services Master services instance.
1098    * @param htd
1099    * @param parent
1100    * @param daughter
1101    * @param midkey
1102    * @param top True if we are to write a 'top' reference.
1103    * @return Path to reference we created.
1104    * @throws IOException
1105    */
1106   private Path createReferences(final MasterServices services,
1107       final HTableDescriptor htd, final HRegionInfo parent,
1108       final HRegionInfo daughter, final byte [] midkey, final boolean top)
1109   throws IOException {
1110     Path rootdir = services.getMasterFileSystem().getRootDir();
1111     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
1112     Path storedir = HStore.getStoreHomedir(tabledir, daughter,
1113       htd.getColumnFamilies()[0].getName());
1114     Reference ref =
1115       top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
1116     long now = System.currentTimeMillis();
1117     // Reference name has this format: StoreFile#REF_NAME_PARSER
1118     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
1119     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1120     ref.write(fs, p);
1121     return p;
1122   }
1123 
1124   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
1125       final HRegionInfo b)
1126   throws IOException {
1127     return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
1128   }
1129 
1130   private HTableDescriptor createHTableDescriptor() {
1131     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t"));
1132     htd.addFamily(new HColumnDescriptor("f"));
1133     return htd;
1134   }
1135 
1136   private MultiResponse buildMultiResponse(MultiRequest req) {
1137     MultiResponse.Builder builder = MultiResponse.newBuilder();
1138     RegionActionResult.Builder regionActionResultBuilder =
1139         RegionActionResult.newBuilder();
1140     ResultOrException.Builder roeBuilder = ResultOrException.newBuilder();
1141     for (RegionAction regionAction: req.getRegionActionList()) {
1142       regionActionResultBuilder.clear();
1143       for (ClientProtos.Action action: regionAction.getActionList()) {
1144         roeBuilder.clear();
1145         roeBuilder.setResult(ClientProtos.Result.getDefaultInstance());
1146         roeBuilder.setIndex(action.getIndex());
1147         regionActionResultBuilder.addResultOrException(roeBuilder.build());
1148       }
1149       builder.addRegionActionResult(regionActionResultBuilder.build());
1150     }
1151     return builder.build();
1152   }
1153 
1154 }
1155