1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase;
19
20 import java.io.File;
21 import java.io.IOException;
22 import java.io.OutputStream;
23 import java.lang.reflect.Field;
24 import java.lang.reflect.Modifier;
25 import java.net.BindException;
26 import java.net.DatagramSocket;
27 import java.net.InetAddress;
28 import java.net.ServerSocket;
29 import java.net.Socket;
30 import java.net.UnknownHostException;
31 import java.security.MessageDigest;
32 import java.util.ArrayList;
33 import java.util.Arrays;
34 import java.util.Collection;
35 import java.util.Collections;
36 import java.util.HashSet;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.NavigableSet;
40 import java.util.Properties;
41 import java.util.Random;
42 import java.util.Set;
43 import java.util.TreeSet;
44 import java.util.UUID;
45 import java.util.concurrent.TimeUnit;
46
47 import org.apache.commons.io.FileUtils;
48 import org.apache.commons.lang.RandomStringUtils;
49 import org.apache.commons.logging.Log;
50 import org.apache.commons.logging.LogFactory;
51 import org.apache.commons.logging.impl.Jdk14Logger;
52 import org.apache.commons.logging.impl.Log4JLogger;
53 import org.apache.hadoop.conf.Configuration;
54 import org.apache.hadoop.fs.FileSystem;
55 import org.apache.hadoop.fs.Path;
56 import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
57 import org.apache.hadoop.hbase.Waiter.Predicate;
58 import org.apache.hadoop.hbase.backup.impl.BackupManager;
59 import org.apache.hadoop.hbase.classification.InterfaceAudience;
60 import org.apache.hadoop.hbase.classification.InterfaceStability;
61 import org.apache.hadoop.hbase.client.Admin;
62 import org.apache.hadoop.hbase.client.Connection;
63 import org.apache.hadoop.hbase.client.ConnectionFactory;
64 import org.apache.hadoop.hbase.client.Consistency;
65 import org.apache.hadoop.hbase.client.Delete;
66 import org.apache.hadoop.hbase.client.Durability;
67 import org.apache.hadoop.hbase.client.Get;
68 import org.apache.hadoop.hbase.client.HBaseAdmin;
69 import org.apache.hadoop.hbase.client.HConnection;
70 import org.apache.hadoop.hbase.client.HTable;
71 import org.apache.hadoop.hbase.client.Put;
72 import org.apache.hadoop.hbase.client.RegionLocator;
73 import org.apache.hadoop.hbase.client.Result;
74 import org.apache.hadoop.hbase.client.ResultScanner;
75 import org.apache.hadoop.hbase.client.Scan;
76 import org.apache.hadoop.hbase.client.Table;
77 import org.apache.hadoop.hbase.fs.HFileSystem;
78 import org.apache.hadoop.hbase.io.compress.Compression;
79 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
80 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
81 import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
82 import org.apache.hadoop.hbase.io.hfile.HFile;
83 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
84 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
85 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
86 import org.apache.hadoop.hbase.master.HMaster;
87 import org.apache.hadoop.hbase.master.RegionStates;
88 import org.apache.hadoop.hbase.master.ServerManager;
89 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
90 import org.apache.hadoop.hbase.regionserver.BloomType;
91 import org.apache.hadoop.hbase.regionserver.HRegion;
92 import org.apache.hadoop.hbase.regionserver.HRegionServer;
93 import org.apache.hadoop.hbase.regionserver.HStore;
94 import org.apache.hadoop.hbase.regionserver.InternalScanner;
95 import org.apache.hadoop.hbase.regionserver.Region;
96 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
97 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
98 import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
99 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
100 import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
101 import org.apache.hadoop.hbase.security.User;
102 import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
103 import org.apache.hadoop.hbase.tool.Canary;
104 import org.apache.hadoop.hbase.util.Bytes;
105 import org.apache.hadoop.hbase.util.FSTableDescriptors;
106 import org.apache.hadoop.hbase.util.FSUtils;
107 import org.apache.hadoop.hbase.util.JVMClusterUtil;
108 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
109 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
110 import org.apache.hadoop.hbase.util.Pair;
111 import org.apache.hadoop.hbase.util.RegionSplitter;
112 import org.apache.hadoop.hbase.util.RetryCounter;
113 import org.apache.hadoop.hbase.util.Threads;
114 import org.apache.hadoop.hbase.wal.WAL;
115 import org.apache.hadoop.hbase.wal.WALFactory;
116 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
117 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
118 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
119 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
120 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
121 import org.apache.hadoop.hdfs.DFSClient;
122 import org.apache.hadoop.hdfs.DistributedFileSystem;
123 import org.apache.hadoop.hdfs.MiniDFSCluster;
124 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
125 import org.apache.hadoop.mapred.JobConf;
126 import org.apache.hadoop.mapred.MiniMRCluster;
127 import org.apache.hadoop.mapred.TaskLog;
128 import org.apache.zookeeper.KeeperException;
129 import org.apache.zookeeper.KeeperException.NodeExistsException;
130 import org.apache.hadoop.minikdc.MiniKdc;
131 import org.apache.zookeeper.WatchedEvent;
132 import org.apache.zookeeper.ZooKeeper;
133 import org.apache.zookeeper.ZooKeeper.States;
134
135 import static org.junit.Assert.assertEquals;
136 import static org.junit.Assert.assertTrue;
137 import static org.junit.Assert.fail;
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153 @InterfaceAudience.Public
154 @InterfaceStability.Evolving
155 @SuppressWarnings("deprecation")
156 public class HBaseTestingUtility extends HBaseCommonTestingUtility {
157 private MiniZooKeeperCluster zkCluster = null;
158
159 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
160
161
162
163
164 public static final int DEFAULT_REGIONS_PER_SERVER = 5;
165
166
167
168
169
170 private boolean passedZkCluster = false;
171 private MiniDFSCluster dfsCluster = null;
172
173 private volatile HBaseCluster hbaseCluster = null;
174 private MiniMRCluster mrCluster = null;
175
176
177 private volatile boolean miniClusterRunning;
178
179 private String hadoopLogDir;
180
181
182 private File clusterTestDir = null;
183
184
185
186 private Path dataTestDirOnTestFS = null;
187
188
189
190
191 private volatile Connection connection;
192
193
194
195
196
197
198
199
200 @Deprecated
201 private static final String TEST_DIRECTORY_KEY = "test.build.data";
202
203
204 private static String FS_URI;
205
206
207 private static final Set<Integer> takenRandomPorts = new HashSet<Integer>();
208
209
210 public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
211 Arrays.asList(new Object[][] {
212 { Compression.Algorithm.NONE },
213 { Compression.Algorithm.GZ }
214 });
215
216
217 public static final List<Object[]> BOOLEAN_PARAMETERIZED =
218 Arrays.asList(new Object[][] {
219 { new Boolean(false) },
220 { new Boolean(true) }
221 });
222
223
224 public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination() ;
225
226 public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
227 Compression.Algorithm.NONE, Compression.Algorithm.GZ
228 };
229
230
231
232
233
234
235
236 public static boolean available(int port) {
237 ServerSocket ss = null;
238 DatagramSocket ds = null;
239 try {
240 ss = new ServerSocket(port);
241 ss.setReuseAddress(true);
242 ds = new DatagramSocket(port);
243 ds.setReuseAddress(true);
244 return true;
245 } catch (IOException e) {
246
247 } finally {
248 if (ds != null) {
249 ds.close();
250 }
251
252 if (ss != null) {
253 try {
254 ss.close();
255 } catch (IOException e) {
256
257 }
258 }
259 }
260
261 return false;
262 }
263
264
265
266
267
268 private static List<Object[]> bloomAndCompressionCombinations() {
269 List<Object[]> configurations = new ArrayList<Object[]>();
270 for (Compression.Algorithm comprAlgo :
271 HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
272 for (BloomType bloomType : BloomType.values()) {
273 configurations.add(new Object[] { comprAlgo, bloomType });
274 }
275 }
276 return Collections.unmodifiableList(configurations);
277 }
278
279
280
281
282 private static List<Object[]> memStoreTSAndTagsCombination() {
283 List<Object[]> configurations = new ArrayList<Object[]>();
284 configurations.add(new Object[] { false, false });
285 configurations.add(new Object[] { false, true });
286 configurations.add(new Object[] { true, false });
287 configurations.add(new Object[] { true, true });
288 return Collections.unmodifiableList(configurations);
289 }
290
291 public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
292 bloomAndCompressionCombinations();
293
294 public HBaseTestingUtility() {
295 this(HBaseConfiguration.create());
296 }
297
298 public HBaseTestingUtility(Configuration conf) {
299 super(conf);
300
301
302 ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
303 }
304
305
306
307
308
309
310
311 public static HBaseTestingUtility createLocalHTU() {
312 Configuration c = HBaseConfiguration.create();
313 return createLocalHTU(c);
314 }
315
316
317
318
319
320
321
322
323 public static HBaseTestingUtility createLocalHTU(Configuration c) {
324 HBaseTestingUtility htu = new HBaseTestingUtility(c);
325 String dataTestDir = htu.getDataTestDir().toString();
326 htu.getConfiguration().set(HConstants.HBASE_DIR, dataTestDir);
327 LOG.debug("Setting " + HConstants.HBASE_DIR + " to " + dataTestDir);
328 return htu;
329 }
330
331
332
333
334 public static void closeRegion(final Region r) throws IOException {
335 if (r != null) {
336 ((HRegion)r).close();
337 }
338 }
339
340
341
342
343
344
345
346
347
348
349
350
351 @Override
352 public Configuration getConfiguration() {
353 return super.getConfiguration();
354 }
355
356 public void setHBaseCluster(HBaseCluster hbaseCluster) {
357 this.hbaseCluster = hbaseCluster;
358 }
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376 @Override
377 protected Path setupDataTestDir() {
378 Path testPath = super.setupDataTestDir();
379 if (null == testPath) {
380 return null;
381 }
382
383 createSubDirAndSystemProperty(
384 "hadoop.log.dir",
385 testPath, "hadoop-log-dir");
386
387
388
389 createSubDirAndSystemProperty(
390 "hadoop.tmp.dir",
391 testPath, "hadoop-tmp-dir");
392
393
394 createSubDir(
395 "mapreduce.cluster.local.dir",
396 testPath, "mapred-local-dir");
397
398 return testPath;
399 }
400
401 private void createSubDirAndSystemProperty(
402 String propertyName, Path parent, String subDirName){
403
404 String sysValue = System.getProperty(propertyName);
405
406 if (sysValue != null) {
407
408
409 LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
410 sysValue + " so I do NOT create it in " + parent);
411 String confValue = conf.get(propertyName);
412 if (confValue != null && !confValue.endsWith(sysValue)){
413 LOG.warn(
414 propertyName + " property value differs in configuration and system: "+
415 "Configuration="+confValue+" while System="+sysValue+
416 " Erasing configuration value by system value."
417 );
418 }
419 conf.set(propertyName, sysValue);
420 } else {
421
422 createSubDir(propertyName, parent, subDirName);
423 System.setProperty(propertyName, conf.get(propertyName));
424 }
425 }
426
427
428
429
430
431
432
433 private Path getBaseTestDirOnTestFS() throws IOException {
434 FileSystem fs = getTestFileSystem();
435 return new Path(fs.getWorkingDirectory(), "test-data");
436 }
437
438
439
440
441 public HTableDescriptor getMetaTableDescriptor() {
442 try {
443 return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
444 } catch (IOException e) {
445 throw new RuntimeException("Unable to create META table descriptor", e);
446 }
447 }
448
449
450
451
452
453
454 Path getClusterTestDir() {
455 if (clusterTestDir == null){
456 setupClusterTestDir();
457 }
458 return new Path(clusterTestDir.getAbsolutePath());
459 }
460
461
462
463
464 private void setupClusterTestDir() {
465 if (clusterTestDir != null) {
466 return;
467 }
468
469
470
471 Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
472 clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
473
474 boolean b = deleteOnExit();
475 if (b) clusterTestDir.deleteOnExit();
476 conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
477 LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
478 }
479
480
481
482
483
484
485
486 public Path getDataTestDirOnTestFS() throws IOException {
487 if (dataTestDirOnTestFS == null) {
488 setupDataTestDirOnTestFS();
489 }
490
491 return dataTestDirOnTestFS;
492 }
493
494
495
496
497
498
499
500
501 public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
502 return new Path(getDataTestDirOnTestFS(), subdirName);
503 }
504
505
506
507
508
509 private void setupDataTestDirOnTestFS() throws IOException {
510 if (dataTestDirOnTestFS != null) {
511 LOG.warn("Data test on test fs dir already setup in "
512 + dataTestDirOnTestFS.toString());
513 return;
514 }
515 dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
516 }
517
518
519
520
521 private Path getNewDataTestDirOnTestFS() throws IOException {
522
523
524
525
526 FileSystem fs = getTestFileSystem();
527 Path newDataTestDir = null;
528 if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
529 File dataTestDir = new File(getDataTestDir().toString());
530 if (deleteOnExit()) dataTestDir.deleteOnExit();
531 newDataTestDir = new Path(dataTestDir.getAbsolutePath());
532 } else {
533 Path base = getBaseTestDirOnTestFS();
534 String randomStr = UUID.randomUUID().toString();
535 newDataTestDir = new Path(base, randomStr);
536 if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
537 }
538 return newDataTestDir;
539 }
540
541
542
543
544
545
546 public boolean cleanupDataTestDirOnTestFS() throws IOException {
547 boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
548 if (ret)
549 dataTestDirOnTestFS = null;
550 return ret;
551 }
552
553
554
555
556
557
558 public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
559 Path cpath = getDataTestDirOnTestFS(subdirName);
560 return getTestFileSystem().delete(cpath, true);
561 }
562
563
564
565
566
567
568
569
570 public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
571 return startMiniDFSCluster(servers, null);
572 }
573
574
575
576
577
578
579
580
581
582
583
584
585 public MiniDFSCluster startMiniDFSCluster(final String hosts[])
586 throws Exception {
587 if ( hosts != null && hosts.length != 0) {
588 return startMiniDFSCluster(hosts.length, hosts);
589 } else {
590 return startMiniDFSCluster(1, null);
591 }
592 }
593
594
595
596
597
598
599
600
601
602
603 public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
604 throws Exception {
605 createDirsAndSetProperties();
606 EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
607
608
609 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).
610 setLevel(org.apache.log4j.Level.ERROR);
611 org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
612 setLevel(org.apache.log4j.Level.ERROR);
613
614
615 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
616 true, null, null, hosts, null);
617
618
619 setFs();
620
621
622 this.dfsCluster.waitClusterUp();
623
624
625 dataTestDirOnTestFS = null;
626
627 return this.dfsCluster;
628 }
629
630 private void setFs() throws IOException {
631 if(this.dfsCluster == null){
632 LOG.info("Skipping setting fs because dfsCluster is null");
633 return;
634 }
635 FileSystem fs = this.dfsCluster.getFileSystem();
636 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
637 }
638
639 public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
640 throws Exception {
641 createDirsAndSetProperties();
642 this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
643 true, null, racks, hosts, null);
644
645
646 FileSystem fs = this.dfsCluster.getFileSystem();
647 FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
648
649
650 this.dfsCluster.waitClusterUp();
651
652
653 dataTestDirOnTestFS = null;
654
655 return this.dfsCluster;
656 }
657
658 public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
659 createDirsAndSetProperties();
660 dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
661 null, null, null);
662 return dfsCluster;
663 }
664
665
666 private void createDirsAndSetProperties() throws IOException {
667 setupClusterTestDir();
668 System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
669 createDirAndSetProperty("cache_data", "test.cache.data");
670 createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
671 hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
672 createDirAndSetProperty("mapred_local", "mapreduce.cluster.local.dir");
673 createDirAndSetProperty("mapred_temp", "mapreduce.cluster.temp.dir");
674 enableShortCircuit();
675
676 Path root = getDataTestDirOnTestFS("hadoop");
677 conf.set(MapreduceTestingShim.getMROutputDirProp(),
678 new Path(root, "mapred-output-dir").toString());
679 conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
680 conf.set("mapreduce.jobtracker.staging.root.dir",
681 new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
682 conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
683 }
684
685
686
687
688
689
690
691 public boolean isReadShortCircuitOn(){
692 final String propName = "hbase.tests.use.shortcircuit.reads";
693 String readOnProp = System.getProperty(propName);
694 if (readOnProp != null){
695 return Boolean.parseBoolean(readOnProp);
696 } else {
697 return conf.getBoolean(propName, false);
698 }
699 }
700
701
702
703
704 private void enableShortCircuit() {
705 if (isReadShortCircuitOn()) {
706 String curUser = System.getProperty("user.name");
707 LOG.info("read short circuit is ON for user " + curUser);
708
709 conf.set("dfs.block.local-path-access.user", curUser);
710
711 conf.setBoolean("dfs.client.read.shortcircuit", true);
712
713 conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
714 } else {
715 LOG.info("read short circuit is OFF");
716 }
717 }
718
719 private String createDirAndSetProperty(final String relPath, String property) {
720 String path = getDataTestDir(relPath).toString();
721 System.setProperty(property, path);
722 conf.set(property, path);
723 new File(path).mkdirs();
724 LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
725 return path;
726 }
727
728
729
730
731
732
733 public void shutdownMiniDFSCluster() throws IOException {
734 if (this.dfsCluster != null) {
735
736 this.dfsCluster.shutdown();
737 dfsCluster = null;
738 dataTestDirOnTestFS = null;
739 FSUtils.setFsDefault(this.conf, new Path("file:///"));
740 }
741 }
742
743
744
745
746
747
748
749
750 public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
751 return startMiniZKCluster(1);
752 }
753
754
755
756
757
758
759
760
761
762 public MiniZooKeeperCluster startMiniZKCluster(
763 final int zooKeeperServerNum,
764 final int ... clientPortList)
765 throws Exception {
766 setupClusterTestDir();
767 return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList);
768 }
769
770 private MiniZooKeeperCluster startMiniZKCluster(final File dir)
771 throws Exception {
772 return startMiniZKCluster(dir, 1, null);
773 }
774
775
776
777
778
779 private MiniZooKeeperCluster startMiniZKCluster(final File dir,
780 final int zooKeeperServerNum,
781 final int [] clientPortList)
782 throws Exception {
783 if (this.zkCluster != null) {
784 throw new IOException("Cluster already running at " + dir);
785 }
786 this.passedZkCluster = false;
787 this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
788 final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
789 if (defPort > 0){
790
791 this.zkCluster.setDefaultClientPort(defPort);
792 }
793
794 if (clientPortList != null) {
795
796 int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ?
797 clientPortList.length : zooKeeperServerNum;
798 for (int i=0; i < clientPortListSize; i++) {
799 this.zkCluster.addClientPort(clientPortList[i]);
800 }
801 }
802 int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
803 this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
804 Integer.toString(clientPort));
805 return this.zkCluster;
806 }
807
808
809
810
811
812
813
814 public void shutdownMiniZKCluster() throws IOException {
815 if (this.zkCluster != null) {
816 this.zkCluster.shutdown();
817 this.zkCluster = null;
818 }
819 }
820
821
822
823
824
825
826
827 public MiniHBaseCluster startMiniCluster() throws Exception {
828 return startMiniCluster(1, 1);
829 }
830
831
832
833
834
835
836
837 public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
838 return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
839 }
840
841
842
843
844
845
846
847
848
849 public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
850 throws Exception {
851 return startMiniCluster(1, numSlaves, create);
852 }
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867 public MiniHBaseCluster startMiniCluster(final int numSlaves)
868 throws Exception {
869 return startMiniCluster(1, numSlaves, false);
870 }
871
872 public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create, boolean withWALDir)
873 throws Exception {
874 return startMiniCluster(1, numSlaves, numSlaves, null, null, null, create, withWALDir);
875 }
876
877
878
879
880
881
882
883
884 public MiniHBaseCluster startMiniCluster(final int numMasters,
885 final int numSlaves, boolean create)
886 throws Exception {
887 return startMiniCluster(numMasters, numSlaves, null, create);
888 }
889
890
891
892
893
894
895
896 public MiniHBaseCluster startMiniCluster(final int numMasters,
897 final int numSlaves)
898 throws Exception {
899 return startMiniCluster(numMasters, numSlaves, null, false);
900 }
901
902 public MiniHBaseCluster startMiniCluster(final int numMasters,
903 final int numSlaves, final String[] dataNodeHosts, boolean create)
904 throws Exception {
905 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
906 null, null, create, false);
907 }
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933 public MiniHBaseCluster startMiniCluster(final int numMasters,
934 final int numSlaves, final String[] dataNodeHosts) throws Exception {
935 return startMiniCluster(numMasters, numSlaves, numSlaves, dataNodeHosts,
936 null, null);
937 }
938
939
940
941
942
943 public MiniHBaseCluster startMiniCluster(final int numMasters,
944 final int numSlaves, final int numDataNodes) throws Exception {
945 return startMiniCluster(numMasters, numSlaves, numDataNodes, null, null, null);
946 }
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975 public MiniHBaseCluster startMiniCluster(final int numMasters,
976 final int numSlaves, final String[] dataNodeHosts, Class<? extends HMaster> masterClass,
977 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
978 throws Exception {
979 return startMiniCluster(
980 numMasters, numSlaves, numSlaves, dataNodeHosts, masterClass, regionserverClass);
981 }
982
983 public MiniHBaseCluster startMiniCluster(final int numMasters,
984 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
985 Class<? extends HMaster> masterClass,
986 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
987 throws Exception {
988 return startMiniCluster(numMasters, numSlaves, numDataNodes, dataNodeHosts,
989 masterClass, regionserverClass, false, false);
990 }
991
992
993
994
995
996
997
998
999 public MiniHBaseCluster startMiniCluster(final int numMasters,
1000 final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
1001 Class<? extends HMaster> masterClass,
1002 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
1003 boolean create, boolean withWALDir)
1004 throws Exception {
1005 if (dataNodeHosts != null && dataNodeHosts.length != 0) {
1006 numDataNodes = dataNodeHosts.length;
1007 }
1008
1009 LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
1010 numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
1011
1012
1013 if (miniClusterRunning) {
1014 throw new IllegalStateException("A mini-cluster is already running");
1015 }
1016 miniClusterRunning = true;
1017
1018 setupClusterTestDir();
1019 System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1020
1021
1022
1023 if(this.dfsCluster == null) {
1024 dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
1025 }
1026
1027
1028 if (this.zkCluster == null) {
1029 startMiniZKCluster(clusterTestDir);
1030 }
1031
1032
1033 return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
1034 regionserverClass, create, withWALDir);
1035 }
1036
1037 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
1038 throws IOException, InterruptedException{
1039 return startMiniHBaseCluster(numMasters, numSlaves, null, null, false, false);
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
1056 final int numSlaves, Class<? extends HMaster> masterClass,
1057 Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
1058 boolean create, boolean withWALDir)
1059 throws IOException, InterruptedException {
1060
1061 createRootDir(create);
1062
1063 if (withWALDir) {
1064 createWALRootDir();
1065 }
1066
1067
1068
1069 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1070 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, numSlaves);
1071 }
1072 if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1073 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
1074 }
1075
1076 Configuration c = new Configuration(this.conf);
1077 this.hbaseCluster =
1078 new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
1079
1080 Table t = new HTable(c, TableName.META_TABLE_NAME);
1081 ResultScanner s = t.getScanner(new Scan());
1082 while (s.next() != null) {
1083 continue;
1084 }
1085 s.close();
1086 t.close();
1087
1088 getHBaseAdmin();
1089 LOG.info("Minicluster is up");
1090
1091
1092
1093 setHBaseFsTmpDir();
1094
1095 return (MiniHBaseCluster)this.hbaseCluster;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104 public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1105 this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
1106
1107 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
1108 ResultScanner s = t.getScanner(new Scan());
1109 while (s.next() != null) {
1110
1111 }
1112 LOG.info("HBase has been restarted");
1113 s.close();
1114 t.close();
1115 }
1116
1117
1118
1119
1120
1121
1122 public MiniHBaseCluster getMiniHBaseCluster() {
1123 if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1124 return (MiniHBaseCluster)this.hbaseCluster;
1125 }
1126 throw new RuntimeException(hbaseCluster + " not an instance of " +
1127 MiniHBaseCluster.class.getName());
1128 }
1129
1130
1131
1132
1133
1134
1135 public void shutdownMiniCluster() throws Exception {
1136 LOG.info("Shutting down minicluster");
1137 if (this.connection != null && !this.connection.isClosed()) {
1138 this.connection.close();
1139 this.connection = null;
1140 }
1141 shutdownMiniHBaseCluster();
1142 if (!this.passedZkCluster){
1143 shutdownMiniZKCluster();
1144 }
1145 shutdownMiniDFSCluster();
1146
1147 cleanupTestDir();
1148 miniClusterRunning = false;
1149 LOG.info("Minicluster is down");
1150 }
1151
1152
1153
1154
1155
1156 @Override
1157 public boolean cleanupTestDir() throws IOException {
1158 boolean ret = super.cleanupTestDir();
1159 if (deleteDir(this.clusterTestDir)) {
1160 this.clusterTestDir = null;
1161 return ret & true;
1162 }
1163 return false;
1164 }
1165
1166
1167
1168
1169
1170 public void shutdownMiniHBaseCluster() throws IOException {
1171 if (hbaseAdmin != null) {
1172 hbaseAdmin.close0();
1173 hbaseAdmin = null;
1174 }
1175
1176
1177 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1178 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1179 if (this.hbaseCluster != null) {
1180 this.hbaseCluster.shutdown();
1181
1182 this.hbaseCluster.waitUntilShutDown();
1183 this.hbaseCluster = null;
1184 }
1185
1186 if (zooKeeperWatcher != null) {
1187 zooKeeperWatcher.close();
1188 zooKeeperWatcher = null;
1189 }
1190 }
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 public Path getDefaultRootDirPath(boolean create) throws IOException {
1201 if (!create) {
1202 return getDataTestDirOnTestFS();
1203 } else {
1204 return getNewDataTestDirOnTestFS();
1205 }
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215 public Path getDefaultRootDirPath() throws IOException {
1216 return getDefaultRootDirPath(false);
1217 }
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231 public Path createRootDir(boolean create) throws IOException {
1232 FileSystem fs = FileSystem.get(this.conf);
1233 Path hbaseRootdir = getDefaultRootDirPath(create);
1234 FSUtils.setRootDir(this.conf, hbaseRootdir);
1235 fs.mkdirs(hbaseRootdir);
1236 FSUtils.setVersion(fs, hbaseRootdir);
1237 return hbaseRootdir;
1238 }
1239
1240
1241
1242
1243
1244
1245
1246 public Path createRootDir() throws IOException {
1247 return createRootDir(false);
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 public Path createWALRootDir() throws IOException {
1260 FileSystem fs = FileSystem.get(this.conf);
1261 Path walDir = getNewDataTestDirOnTestFS();
1262 FSUtils.setWALRootDir(this.conf, walDir);
1263 fs.mkdirs(walDir);
1264 return walDir;
1265 }
1266
1267 private void setHBaseFsTmpDir() throws IOException {
1268 String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1269 if (hbaseFsTmpDirInString == null) {
1270 this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1271 LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1272 } else {
1273 LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1274 }
1275 }
1276
1277
1278
1279
1280
1281 public void flush() throws IOException {
1282 getMiniHBaseCluster().flushcache();
1283 }
1284
1285
1286
1287
1288
1289 public void flush(TableName tableName) throws IOException {
1290 getMiniHBaseCluster().flushcache(tableName);
1291 }
1292
1293
1294
1295
1296
1297 public void compact(boolean major) throws IOException {
1298 getMiniHBaseCluster().compact(major);
1299 }
1300
1301
1302
1303
1304
1305 public void compact(TableName tableName, boolean major) throws IOException {
1306 getMiniHBaseCluster().compact(tableName, major);
1307 }
1308
1309
1310
1311
1312
1313
1314
1315
1316 public Table createTable(TableName tableName, String family)
1317 throws IOException{
1318 return createTable(tableName, new String[]{family});
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328 public HTable createTable(byte[] tableName, byte[] family)
1329 throws IOException{
1330 return createTable(TableName.valueOf(tableName), new byte[][]{family});
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340 public Table createTable(TableName tableName, String[] families)
1341 throws IOException {
1342 List<byte[]> fams = new ArrayList<byte[]>(families.length);
1343 for (String family : families) {
1344 fams.add(Bytes.toBytes(family));
1345 }
1346 return createTable(tableName, fams.toArray(new byte[0][]));
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356 public HTable createTable(TableName tableName, byte[] family)
1357 throws IOException{
1358 return createTable(tableName, new byte[][]{family});
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369 public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1370 throws IOException {
1371 if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1372 byte[] startKey = Bytes.toBytes("aaaaa");
1373 byte[] endKey = Bytes.toBytes("zzzzz");
1374 byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1375
1376 return createTable(tableName, new byte[][] { family }, splitKeys);
1377 }
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 public HTable createTable(byte[] tableName, byte[][] families)
1388 throws IOException {
1389 return createTable(tableName, families,
1390 new Configuration(getConfiguration()));
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400 public HTable createTable(TableName tableName, byte[][] families)
1401 throws IOException {
1402 return createTable(tableName, families, (byte[][]) null);
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412 public HTable createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1413 return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1425 throws IOException {
1426 return createTable(tableName, families, splitKeys, new Configuration(getConfiguration()));
1427 }
1428
1429 public HTable createTable(byte[] tableName, byte[][] families,
1430 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1431 return createTable(TableName.valueOf(tableName), families, numVersions,
1432 startKey, endKey, numRegions);
1433 }
1434
1435 public HTable createTable(String tableName, byte[][] families,
1436 int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException {
1437 return createTable(TableName.valueOf(tableName), families, numVersions,
1438 startKey, endKey, numRegions);
1439 }
1440
1441 public HTable createTable(TableName tableName, byte[][] families,
1442 int numVersions, byte[] startKey, byte[] endKey, int numRegions)
1443 throws IOException{
1444 HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1445
1446 getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
1447
1448 waitUntilAllRegionsAssigned(tableName);
1449 return new HTable(getConfiguration(), tableName);
1450 }
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
1461 throws IOException {
1462 return createTable(htd, families, (byte[][]) null, c);
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 public HTable createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys,
1475 Configuration c) throws IOException {
1476 for (byte[] family : families) {
1477 HColumnDescriptor hcd = new HColumnDescriptor(family);
1478
1479
1480
1481 hcd.setBloomFilterType(BloomType.NONE);
1482 htd.addFamily(hcd);
1483 }
1484 getHBaseAdmin().createTable(htd, splitKeys);
1485
1486
1487 waitUntilAllRegionsAssigned(htd.getTableName());
1488 return (HTable) getConnection().getTable(htd.getTableName());
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498 public HTable createTable(HTableDescriptor htd, byte[][] splitRows)
1499 throws IOException {
1500 getHBaseAdmin().createTable(htd, splitRows);
1501
1502 waitUntilAllRegionsAssigned(htd.getTableName());
1503 return new HTable(getConfiguration(), htd.getTableName());
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 public HTable createTable(TableName tableName, byte[][] families,
1515 final Configuration c)
1516 throws IOException {
1517 return createTable(tableName, families, (byte[][]) null, c);
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1530 final Configuration c) throws IOException {
1531 return createTable(new HTableDescriptor(tableName), families, splitKeys, c);
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 public HTable createTable(byte[] tableName, byte[][] families,
1543 final Configuration c)
1544 throws IOException {
1545 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1546 for(byte[] family : families) {
1547 HColumnDescriptor hcd = new HColumnDescriptor(family);
1548
1549
1550
1551 hcd.setBloomFilterType(BloomType.NONE);
1552 desc.addFamily(hcd);
1553 }
1554 getHBaseAdmin().createTable(desc);
1555 return new HTable(c, desc.getTableName());
1556 }
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 public HTable createTable(TableName tableName, byte[][] families,
1568 final Configuration c, int numVersions)
1569 throws IOException {
1570 HTableDescriptor desc = new HTableDescriptor(tableName);
1571 for(byte[] family : families) {
1572 HColumnDescriptor hcd = new HColumnDescriptor(family)
1573 .setMaxVersions(numVersions);
1574 desc.addFamily(hcd);
1575 }
1576 getHBaseAdmin().createTable(desc);
1577
1578 waitUntilAllRegionsAssigned(tableName);
1579 return new HTable(c, tableName);
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 public HTable createTable(byte[] tableName, byte[][] families,
1592 final Configuration c, int numVersions)
1593 throws IOException {
1594 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1595 for(byte[] family : families) {
1596 HColumnDescriptor hcd = new HColumnDescriptor(family)
1597 .setMaxVersions(numVersions);
1598 desc.addFamily(hcd);
1599 }
1600 getHBaseAdmin().createTable(desc);
1601 return new HTable(c, desc.getTableName());
1602 }
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 public HTable createTable(byte[] tableName, byte[] family, int numVersions)
1613 throws IOException {
1614 return createTable(tableName, new byte[][]{family}, numVersions);
1615 }
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 public HTable createTable(TableName tableName, byte[] family, int numVersions)
1626 throws IOException {
1627 return createTable(tableName, new byte[][]{family}, numVersions);
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 public HTable createTable(byte[] tableName, byte[][] families,
1639 int numVersions)
1640 throws IOException {
1641 return createTable(TableName.valueOf(tableName), families, numVersions);
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 public HTable createTable(TableName tableName, byte[][] families,
1653 int numVersions)
1654 throws IOException {
1655 return createTable(tableName, families, numVersions, (byte[][]) null);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 public HTable createTable(TableName tableName, byte[][] families, int numVersions,
1668 byte[][] splitKeys) throws IOException {
1669 HTableDescriptor desc = new HTableDescriptor(tableName);
1670 for (byte[] family : families) {
1671 HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1672 desc.addFamily(hcd);
1673 }
1674 getHBaseAdmin().createTable(desc, splitKeys);
1675
1676 waitUntilAllRegionsAssigned(tableName);
1677 return new HTable(new Configuration(getConfiguration()), tableName);
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 public HTable createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1689 throws IOException {
1690 return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 public HTable createTable(byte[] tableName, byte[][] families,
1703 int numVersions, int blockSize) throws IOException {
1704 return createTable(TableName.valueOf(tableName),
1705 families, numVersions, blockSize);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 public HTable createTable(TableName tableName, byte[][] families,
1718 int numVersions, int blockSize) throws IOException {
1719 HTableDescriptor desc = new HTableDescriptor(tableName);
1720 for (byte[] family : families) {
1721 HColumnDescriptor hcd = new HColumnDescriptor(family)
1722 .setMaxVersions(numVersions)
1723 .setBlocksize(blockSize);
1724 desc.addFamily(hcd);
1725 }
1726 getHBaseAdmin().createTable(desc);
1727
1728 waitUntilAllRegionsAssigned(tableName);
1729 return new HTable(new Configuration(getConfiguration()), tableName);
1730 }
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 public HTable createTable(byte[] tableName, byte[][] families,
1741 int[] numVersions)
1742 throws IOException {
1743 return createTable(TableName.valueOf(tableName), families, numVersions);
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 public HTable createTable(TableName tableName, byte[][] families,
1755 int[] numVersions)
1756 throws IOException {
1757 HTableDescriptor desc = new HTableDescriptor(tableName);
1758 int i = 0;
1759 for (byte[] family : families) {
1760 HColumnDescriptor hcd = new HColumnDescriptor(family)
1761 .setMaxVersions(numVersions[i]);
1762 desc.addFamily(hcd);
1763 i++;
1764 }
1765 getHBaseAdmin().createTable(desc);
1766
1767 waitUntilAllRegionsAssigned(tableName);
1768 return new HTable(new Configuration(getConfiguration()), tableName);
1769 }
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
1780 throws IOException{
1781 return createTable(TableName.valueOf(tableName), family, splitRows);
1782 }
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
1793 throws IOException {
1794 HTableDescriptor desc = new HTableDescriptor(tableName);
1795 HColumnDescriptor hcd = new HColumnDescriptor(family);
1796 desc.addFamily(hcd);
1797 getHBaseAdmin().createTable(desc, splitRows);
1798
1799 waitUntilAllRegionsAssigned(tableName);
1800 return new HTable(getConfiguration(), tableName);
1801 }
1802
1803
1804
1805
1806
1807
1808
1809
1810 public HTable createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1811 return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1812 }
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822 public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows)
1823 throws IOException {
1824 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
1825 for(byte[] family:families) {
1826 HColumnDescriptor hcd = new HColumnDescriptor(family);
1827 desc.addFamily(hcd);
1828 }
1829 getHBaseAdmin().createTable(desc, splitRows);
1830
1831 waitUntilAllRegionsAssigned(desc.getTableName());
1832 return new HTable(getConfiguration(), desc.getTableName());
1833 }
1834
1835
1836
1837
1838 public static WAL createWal(final Configuration conf, final Path rootDir, final Path walRootDir, final HRegionInfo hri)
1839 throws IOException {
1840
1841
1842 Configuration confForWAL = new Configuration(conf);
1843 confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
1844 FSUtils.setWALRootDir(confForWAL, walRootDir);
1845 return (new WALFactory(confForWAL,
1846 Collections.<WALActionsListener>singletonList(new MetricsWAL()),
1847 "hregion-" + RandomStringUtils.randomNumeric(8))).
1848 getWAL(hri.getEncodedNameAsBytes());
1849 }
1850
1851
1852
1853
1854
1855 public static HRegion createRegionAndWAL(final HRegionInfo info, final Path rootDir,
1856 final Path walRootDir, final Configuration conf, final HTableDescriptor htd) throws IOException {
1857 return createRegionAndWAL(info, rootDir, walRootDir, conf, htd, true);
1858 }
1859
1860
1861
1862
1863
1864 public static HRegion createRegionAndWAL(final HRegionInfo info, final Path rootDir,
1865 final Path walRootDir, final Configuration conf, final HTableDescriptor htd, boolean initialize)
1866 throws IOException {
1867 WAL wal = createWal(conf, rootDir, walRootDir, info);
1868 return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
1869 }
1870
1871
1872
1873
1874 public static void closeRegionAndWAL(final Region r) throws IOException {
1875 closeRegionAndWAL((HRegion)r);
1876 }
1877
1878
1879
1880
1881 public static void closeRegionAndWAL(final HRegion r) throws IOException {
1882 if (r == null) return;
1883 r.close();
1884 if (r.getWAL() == null) return;
1885 r.getWAL().close();
1886 }
1887
1888
1889
1890
1891 @SuppressWarnings("serial")
1892 public static void modifyTableSync(Admin admin, HTableDescriptor desc)
1893 throws IOException, InterruptedException {
1894 admin.modifyTable(desc.getTableName(), desc);
1895 Pair<Integer, Integer> status = new Pair<Integer, Integer>() {{
1896 setFirst(0);
1897 setSecond(0);
1898 }};
1899 int i = 0;
1900 do {
1901 status = admin.getAlterStatus(desc.getTableName());
1902 if (status.getSecond() != 0) {
1903 LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond()
1904 + " regions updated.");
1905 Thread.sleep(1 * 1000l);
1906 } else {
1907 LOG.debug("All regions updated.");
1908 break;
1909 }
1910 } while (status.getFirst() != 0 && i++ < 500);
1911 if (status.getFirst() != 0) {
1912 throw new IOException("Failed to update all regions even after 500 seconds.");
1913 }
1914 }
1915
1916
1917
1918
1919 public static void setReplicas(Admin admin, TableName table, int replicaCount)
1920 throws IOException, InterruptedException {
1921 admin.disableTable(table);
1922 HTableDescriptor desc = admin.getTableDescriptor(table);
1923 desc.setRegionReplication(replicaCount);
1924 admin.modifyTable(desc.getTableName(), desc);
1925 admin.enableTable(table);
1926 }
1927
1928
1929
1930
1931
1932 public void deleteTable(String tableName) throws IOException {
1933 deleteTable(TableName.valueOf(tableName));
1934 }
1935
1936
1937
1938
1939
1940 public void deleteTable(byte[] tableName) throws IOException {
1941 deleteTable(TableName.valueOf(tableName));
1942 }
1943
1944
1945
1946
1947
1948 public void deleteTable(TableName tableName) throws IOException {
1949 try {
1950 getHBaseAdmin().disableTable(tableName);
1951 } catch (TableNotEnabledException e) {
1952 LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1953 }
1954 getHBaseAdmin().deleteTable(tableName);
1955 }
1956
1957
1958
1959
1960
1961 public void deleteTableIfAny(TableName tableName) throws IOException {
1962 try {
1963 deleteTable(tableName);
1964 } catch (TableNotFoundException e) {
1965
1966 }
1967 }
1968
1969
1970
1971
1972
1973 public final static byte [] fam1 = Bytes.toBytes("colfamily11");
1974 public final static byte [] fam2 = Bytes.toBytes("colfamily21");
1975 public final static byte [] fam3 = Bytes.toBytes("colfamily31");
1976 public static final byte[][] COLUMNS = {fam1, fam2, fam3};
1977 private static final int MAXVERSIONS = 3;
1978
1979 public static final char FIRST_CHAR = 'a';
1980 public static final char LAST_CHAR = 'z';
1981 public static final byte [] START_KEY_BYTES = {FIRST_CHAR, FIRST_CHAR, FIRST_CHAR};
1982 public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1983
1984
1985
1986
1987
1988
1989
1990
1991 public HTableDescriptor createTableDescriptor(final String name,
1992 final int minVersions, final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1993 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
1994 for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
1995 htd.addFamily(new HColumnDescriptor(cfName)
1996 .setMinVersions(minVersions)
1997 .setMaxVersions(versions)
1998 .setKeepDeletedCells(keepDeleted)
1999 .setBlockCacheEnabled(false)
2000 .setTimeToLive(ttl)
2001 );
2002 }
2003 return htd;
2004 }
2005
2006
2007
2008
2009
2010
2011
2012 public HTableDescriptor createTableDescriptor(final String name) {
2013 return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS,
2014 MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
2015 }
2016
2017
2018
2019
2020
2021 public HRegion createHRegion(
2022 final HRegionInfo info,
2023 final Path rootDir,
2024 final Configuration conf,
2025 final HTableDescriptor htd) throws IOException {
2026 return HRegion.createHRegion(info, rootDir, conf, htd);
2027 }
2028
2029 public HTableDescriptor createTableDescriptor(final TableName tableName,
2030 byte[] family) {
2031 return createTableDescriptor(tableName, new byte[][] {family}, 1);
2032 }
2033
2034 public HTableDescriptor createTableDescriptor(final TableName tableName,
2035 byte[][] families, int maxVersions) {
2036 HTableDescriptor desc = new HTableDescriptor(tableName);
2037 for (byte[] family : families) {
2038 HColumnDescriptor hcd = new HColumnDescriptor(family)
2039 .setMaxVersions(maxVersions);
2040 desc.addFamily(hcd);
2041 }
2042 return desc;
2043 }
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 public HRegion createLocalHRegion(HTableDescriptor desc, byte [] startKey,
2054 byte [] endKey)
2055 throws IOException {
2056 HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
2057 return createLocalHRegion(hri, desc);
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException {
2068 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc);
2069 }
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc, WAL wal)
2080 throws IOException {
2081 return HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), desc, wal);
2082 }
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
2097 String callingMethod, Configuration conf, boolean isReadOnly, Durability durability,
2098 WAL wal, byte[]... families) throws IOException {
2099 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
2100 htd.setReadOnly(isReadOnly);
2101 for (byte[] family : families) {
2102 HColumnDescriptor hcd = new HColumnDescriptor(family);
2103
2104 hcd.setMaxVersions(Integer.MAX_VALUE);
2105 htd.addFamily(hcd);
2106 }
2107 htd.setDurability(durability);
2108 HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
2109 return createLocalHRegion(info, htd, wal);
2110 }
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 public HTable deleteTableData(byte[] tableName) throws IOException {
2122 return deleteTableData(TableName.valueOf(tableName));
2123 }
2124
2125
2126
2127
2128
2129
2130
2131
2132 public HTable deleteTableData(TableName tableName) throws IOException {
2133 HTable table = new HTable(getConfiguration(), tableName);
2134 Scan scan = new Scan();
2135 ResultScanner resScan = table.getScanner(scan);
2136 for(Result res : resScan) {
2137 Delete del = new Delete(res.getRow());
2138 table.delete(del);
2139 }
2140 resScan = table.getScanner(scan);
2141 resScan.close();
2142 return table;
2143 }
2144
2145
2146
2147
2148
2149
2150
2151
2152 public HTable truncateTable(final TableName tableName, final boolean preserveRegions)
2153 throws IOException {
2154 Admin admin = getHBaseAdmin();
2155 admin.truncateTable(tableName, preserveRegions);
2156 return new HTable(getConfiguration(), tableName);
2157 }
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168 public HTable truncateTable(final TableName tableName) throws IOException {
2169 return truncateTable(tableName, false);
2170 }
2171
2172
2173
2174
2175
2176
2177
2178
2179 public HTable truncateTable(final byte[] tableName, final boolean preserveRegions)
2180 throws IOException {
2181 return truncateTable(TableName.valueOf(tableName), preserveRegions);
2182 }
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 public HTable truncateTable(final byte[] tableName) throws IOException {
2194 return truncateTable(tableName, false);
2195 }
2196
2197
2198
2199
2200
2201
2202
2203
2204 public int loadTable(final Table t, final byte[] f) throws IOException {
2205 return loadTable(t, new byte[][] {f});
2206 }
2207
2208
2209
2210
2211
2212
2213
2214
2215 public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2216 return loadTable(t, new byte[][] {f}, null, writeToWAL);
2217 }
2218
2219
2220
2221
2222
2223
2224
2225
2226 public int loadTable(final Table t, final byte[][] f) throws IOException {
2227 return loadTable(t, f, null);
2228 }
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2239 return loadTable(t, f, value, true);
2240 }
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
2251 List<Put> puts = new ArrayList<>();
2252 for (byte[] row : HBaseTestingUtility.ROWS) {
2253 Put put = new Put(row);
2254 put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2255 for (int i = 0; i < f.length; i++) {
2256 put.add(f[i], f[i], value != null ? value : row);
2257 }
2258 puts.add(put);
2259 }
2260 t.put(puts);
2261 return puts.size();
2262 }
2263
2264
2265
2266
2267 public static class SeenRowTracker {
2268 int dim = 'z' - 'a' + 1;
2269 int[][][] seenRows = new int[dim][dim][dim];
2270 byte[] startRow;
2271 byte[] stopRow;
2272
2273 public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2274 this.startRow = startRow;
2275 this.stopRow = stopRow;
2276 }
2277
2278 void reset() {
2279 for (byte[] row : ROWS) {
2280 seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2281 }
2282 }
2283
2284 int i(byte b) {
2285 return b - 'a';
2286 }
2287
2288 public void addRow(byte[] row) {
2289 seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2290 }
2291
2292
2293
2294
2295 public void validate() {
2296 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2297 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2298 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2299 int count = seenRows[i(b1)][i(b2)][i(b3)];
2300 int expectedCount = 0;
2301 if (Bytes.compareTo(new byte[] {b1,b2,b3}, startRow) >= 0
2302 && Bytes.compareTo(new byte[] {b1,b2,b3}, stopRow) < 0) {
2303 expectedCount = 1;
2304 }
2305 if (count != expectedCount) {
2306 String row = new String(new byte[] {b1,b2,b3});
2307 throw new RuntimeException("Row:" + row + " has a seen count of " + count + " instead of " + expectedCount);
2308 }
2309 }
2310 }
2311 }
2312 }
2313 }
2314
2315 public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2316 return loadRegion(r, f, false);
2317 }
2318
2319 public int loadRegion(final Region r, final byte[] f) throws IOException {
2320 return loadRegion((HRegion)r, f);
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331 public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
2332 throws IOException {
2333 byte[] k = new byte[3];
2334 int rowCount = 0;
2335 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2336 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2337 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2338 k[0] = b1;
2339 k[1] = b2;
2340 k[2] = b3;
2341 Put put = new Put(k);
2342 put.setDurability(Durability.SKIP_WAL);
2343 put.add(f, null, k);
2344 if (r.getWAL() == null) {
2345 put.setDurability(Durability.SKIP_WAL);
2346 }
2347 int preRowCount = rowCount;
2348 int pause = 10;
2349 int maxPause = 1000;
2350 while (rowCount == preRowCount) {
2351 try {
2352 r.put(put);
2353 rowCount++;
2354 } catch (RegionTooBusyException e) {
2355 pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2356 Threads.sleep(pause);
2357 }
2358 }
2359 }
2360 }
2361 if (flush) {
2362 r.flush(true);
2363 }
2364 }
2365 return rowCount;
2366 }
2367
2368 public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2369 throws IOException {
2370 for (int i = startRow; i < endRow; i++) {
2371 byte[] data = Bytes.toBytes(String.valueOf(i));
2372 Put put = new Put(data);
2373 put.add(f, null, data);
2374 t.put(put);
2375 }
2376 }
2377
2378 public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2379 int replicaId)
2380 throws IOException {
2381 for (int i = startRow; i < endRow; i++) {
2382 String failMsg = "Failed verification of row :" + i;
2383 byte[] data = Bytes.toBytes(String.valueOf(i));
2384 Get get = new Get(data);
2385 get.setReplicaId(replicaId);
2386 get.setConsistency(Consistency.TIMELINE);
2387 Result result = table.get(get);
2388 assertTrue(failMsg, result.containsColumn(f, null));
2389 assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
2390 Cell cell = result.getColumnLatestCell(f, null);
2391 assertTrue(failMsg,
2392 Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2393 cell.getValueLength()));
2394 }
2395 }
2396
2397 public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2398 throws IOException {
2399 verifyNumericRows((HRegion)region, f, startRow, endRow);
2400 }
2401
2402 public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2403 throws IOException {
2404 verifyNumericRows(region, f, startRow, endRow, true);
2405 }
2406
2407 public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2408 final boolean present) throws IOException {
2409 verifyNumericRows((HRegion)region, f, startRow, endRow, present);
2410 }
2411
2412 public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2413 final boolean present) throws IOException {
2414 for (int i = startRow; i < endRow; i++) {
2415 String failMsg = "Failed verification of row :" + i;
2416 byte[] data = Bytes.toBytes(String.valueOf(i));
2417 Result result = region.get(new Get(data));
2418
2419 boolean hasResult = result != null && !result.isEmpty();
2420 assertEquals(failMsg + result, present, hasResult);
2421 if (!present) continue;
2422
2423 assertTrue(failMsg, result.containsColumn(f, null));
2424 assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
2425 Cell cell = result.getColumnLatestCell(f, null);
2426 assertTrue(failMsg,
2427 Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
2428 cell.getValueLength()));
2429 }
2430 }
2431
2432 public void deleteNumericRows(final HTable t, final byte[] f, int startRow, int endRow)
2433 throws IOException {
2434 for (int i = startRow; i < endRow; i++) {
2435 byte[] data = Bytes.toBytes(String.valueOf(i));
2436 Delete delete = new Delete(data);
2437 delete.deleteFamily(f);
2438 t.delete(delete);
2439 }
2440 }
2441
2442
2443
2444
2445 public int countRows(final Table table) throws IOException {
2446 Scan scan = new Scan();
2447 ResultScanner results = table.getScanner(scan);
2448 int count = 0;
2449 for (@SuppressWarnings("unused") Result res : results) {
2450 count++;
2451 }
2452 results.close();
2453 return count;
2454 }
2455
2456 public int countRows(final Table table, final Scan scan) throws IOException {
2457 ResultScanner results = table.getScanner(scan);
2458 int count = 0;
2459 for (@SuppressWarnings("unused") Result res : results) {
2460 count++;
2461 }
2462 results.close();
2463 return count;
2464 }
2465
2466 public int countRows(final Table table, final byte[]... families) throws IOException {
2467 Scan scan = new Scan();
2468 for (byte[] family: families) {
2469 scan.addFamily(family);
2470 }
2471 ResultScanner results = table.getScanner(scan);
2472 int count = 0;
2473 for (@SuppressWarnings("unused") Result res : results) {
2474 count++;
2475 }
2476 results.close();
2477 return count;
2478 }
2479
2480
2481
2482
2483 public int countRows(final TableName tableName) throws IOException {
2484 Table table = getConnection().getTable(tableName);
2485 try {
2486 return countRows(table);
2487 } finally {
2488 table.close();
2489 }
2490 }
2491
2492
2493
2494
2495 public String checksumRows(final Table table) throws Exception {
2496 Scan scan = new Scan();
2497 ResultScanner results = table.getScanner(scan);
2498 MessageDigest digest = MessageDigest.getInstance("MD5");
2499 for (Result res : results) {
2500 digest.update(res.getRow());
2501 }
2502 results.close();
2503 return digest.toString();
2504 }
2505
2506
2507 public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3];
2508 static {
2509 int i = 0;
2510 for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2511 for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2512 for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2513 ROWS[i][0] = b1;
2514 ROWS[i][1] = b2;
2515 ROWS[i][2] = b3;
2516 i++;
2517 }
2518 }
2519 }
2520 }
2521
2522 public static final byte[][] KEYS = {
2523 HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2524 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2525 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2526 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2527 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2528 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2529 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2530 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2531 Bytes.toBytes("xxx"), Bytes.toBytes("yyy")
2532 };
2533
2534 public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = {
2535 Bytes.toBytes("bbb"),
2536 Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
2537 Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
2538 Bytes.toBytes("iii"), Bytes.toBytes("jjj"), Bytes.toBytes("kkk"),
2539 Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2540 Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"),
2541 Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"),
2542 Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("www"),
2543 Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
2544 };
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556 public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2557 final HTableDescriptor htd, byte [][] startKeys)
2558 throws IOException {
2559 Table meta = new HTable(conf, TableName.META_TABLE_NAME);
2560 Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2561 List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
2562
2563 for (int i = 0; i < startKeys.length; i++) {
2564 int j = (i + 1) % startKeys.length;
2565 HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
2566 startKeys[j]);
2567 MetaTableAccessor.addRegionToMeta(meta, hri);
2568 newRegions.add(hri);
2569 }
2570
2571 meta.close();
2572 return newRegions;
2573 }
2574
2575
2576
2577
2578
2579
2580 public List<byte[]> getMetaTableRows() throws IOException {
2581
2582 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2583 List<byte[]> rows = new ArrayList<byte[]>();
2584 ResultScanner s = t.getScanner(new Scan());
2585 for (Result result : s) {
2586 LOG.info("getMetaTableRows: row -> " +
2587 Bytes.toStringBinary(result.getRow()));
2588 rows.add(result.getRow());
2589 }
2590 s.close();
2591 t.close();
2592 return rows;
2593 }
2594
2595
2596
2597
2598
2599
2600 public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2601
2602 Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
2603 List<byte[]> rows = new ArrayList<byte[]>();
2604 ResultScanner s = t.getScanner(new Scan());
2605 for (Result result : s) {
2606 HRegionInfo info = HRegionInfo.getHRegionInfo(result);
2607 if (info == null) {
2608 LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2609
2610 continue;
2611 }
2612
2613 if (info.getTable().equals(tableName)) {
2614 LOG.info("getMetaTableRows: row -> " +
2615 Bytes.toStringBinary(result.getRow()) + info);
2616 rows.add(result.getRow());
2617 }
2618 }
2619 s.close();
2620 t.close();
2621 return rows;
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2636 throws IOException, InterruptedException {
2637 List<byte[]> metaRows = getMetaTableRows(tableName);
2638 if (metaRows == null || metaRows.isEmpty()) {
2639 return null;
2640 }
2641 LOG.debug("Found " + metaRows.size() + " rows for table " +
2642 tableName);
2643 byte [] firstrow = metaRows.get(0);
2644 LOG.debug("FirstRow=" + Bytes.toString(firstrow));
2645 long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2646 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2647 int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2648 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2649 RetryCounter retrier = new RetryCounter(numRetries+1, (int)pause, TimeUnit.MICROSECONDS);
2650 while(retrier.shouldRetry()) {
2651 int index = getMiniHBaseCluster().getServerWith(firstrow);
2652 if (index != -1) {
2653 return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2654 }
2655
2656 retrier.sleepUntilNextRetry();
2657 }
2658 return null;
2659 }
2660
2661
2662
2663
2664
2665
2666
2667 public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2668 startMiniMapReduceCluster(2);
2669 return mrCluster;
2670 }
2671
2672
2673
2674
2675
2676 private void forceChangeTaskLogDir() {
2677 Field logDirField;
2678 try {
2679 logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2680 logDirField.setAccessible(true);
2681
2682 Field modifiersField = Field.class.getDeclaredField("modifiers");
2683 modifiersField.setAccessible(true);
2684 modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2685
2686 logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2687 } catch (SecurityException e) {
2688 throw new RuntimeException(e);
2689 } catch (NoSuchFieldException e) {
2690
2691 throw new RuntimeException(e);
2692 } catch (IllegalArgumentException e) {
2693 throw new RuntimeException(e);
2694 } catch (IllegalAccessException e) {
2695 throw new RuntimeException(e);
2696 }
2697 }
2698
2699
2700
2701
2702
2703
2704
2705 private void startMiniMapReduceCluster(final int servers) throws IOException {
2706 if (mrCluster != null) {
2707 throw new IllegalStateException("MiniMRCluster is already running");
2708 }
2709 LOG.info("Starting mini mapreduce cluster...");
2710 setupClusterTestDir();
2711 createDirsAndSetProperties();
2712
2713 forceChangeTaskLogDir();
2714
2715
2716
2717
2718 conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2719
2720
2721
2722 conf.setBoolean("mapreduce.map.speculative", false);
2723 conf.setBoolean("mapreduce.reduce.speculative", false);
2724
2725
2726 conf.setInt("mapreduce.map.memory.mb", 2048);
2727 conf.setInt("mapreduce.reduce.memory.mb", 2048);
2728
2729
2730 mrCluster = new MiniMRCluster(servers,
2731 FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
2732 null, null, new JobConf(this.conf));
2733 JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2734 if (jobConf == null) {
2735 jobConf = mrCluster.createJobConf();
2736 }
2737
2738 jobConf.set("mapreduce.cluster.local.dir",
2739 conf.get("mapreduce.cluster.local.dir"));
2740 LOG.info("Mini mapreduce cluster started");
2741
2742
2743
2744
2745 conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2746
2747 conf.set("mapreduce.framework.name", "yarn");
2748 conf.setBoolean("yarn.is.minicluster", true);
2749 String rmAddress = jobConf.get("yarn.resourcemanager.address");
2750 if (rmAddress != null) {
2751 conf.set("yarn.resourcemanager.address", rmAddress);
2752 }
2753 String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2754 if (historyAddress != null) {
2755 conf.set("mapreduce.jobhistory.address", historyAddress);
2756 }
2757 String schedulerAddress =
2758 jobConf.get("yarn.resourcemanager.scheduler.address");
2759 if (schedulerAddress != null) {
2760 conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2761 }
2762 }
2763
2764
2765
2766
2767 public void shutdownMiniMapReduceCluster() {
2768 if (mrCluster != null) {
2769 LOG.info("Stopping mini mapreduce cluster...");
2770 mrCluster.shutdown();
2771 mrCluster = null;
2772 LOG.info("Mini mapreduce cluster stopped");
2773 }
2774
2775 conf.set("mapreduce.jobtracker.address", "local");
2776 }
2777
2778
2779
2780
2781 public RegionServerServices createMockRegionServerService() throws IOException {
2782 return createMockRegionServerService((ServerName)null);
2783 }
2784
2785
2786
2787
2788
2789 public RegionServerServices createMockRegionServerService(RpcServerInterface rpc) throws IOException {
2790 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2791 rss.setFileSystem(getTestFileSystem());
2792 rss.setRpcServer(rpc);
2793 return rss;
2794 }
2795
2796
2797
2798
2799
2800 public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2801 final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2802 rss.setFileSystem(getTestFileSystem());
2803 return rss;
2804 }
2805
2806
2807
2808
2809
2810
2811 public void enableDebug(Class<?> clazz) {
2812 Log l = LogFactory.getLog(clazz);
2813 if (l instanceof Log4JLogger) {
2814 ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG);
2815 } else if (l instanceof Jdk14Logger) {
2816 ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL);
2817 }
2818 }
2819
2820
2821
2822
2823
2824 public void expireMasterSession() throws Exception {
2825 HMaster master = getMiniHBaseCluster().getMaster();
2826 expireSession(master.getZooKeeper(), false);
2827 }
2828
2829
2830
2831
2832
2833
2834 public void expireRegionServerSession(int index) throws Exception {
2835 HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2836 expireSession(rs.getZooKeeper(), false);
2837 decrementMinRegionServerCount();
2838 }
2839
2840 private void decrementMinRegionServerCount() {
2841
2842
2843 decrementMinRegionServerCount(getConfiguration());
2844
2845
2846 for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2847 decrementMinRegionServerCount(master.getMaster().getConfiguration());
2848 }
2849 }
2850
2851 private void decrementMinRegionServerCount(Configuration conf) {
2852 int currentCount = conf.getInt(
2853 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2854 if (currentCount != -1) {
2855 conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
2856 Math.max(currentCount - 1, 1));
2857 }
2858 }
2859
2860 public void expireSession(ZooKeeperWatcher nodeZK) throws Exception {
2861 expireSession(nodeZK, false);
2862 }
2863
2864 @Deprecated
2865 public void expireSession(ZooKeeperWatcher nodeZK, Server server)
2866 throws Exception {
2867 expireSession(nodeZK, false);
2868 }
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881 public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
2882 throws Exception {
2883 Configuration c = new Configuration(this.conf);
2884 String quorumServers = ZKConfig.getZKQuorumServersString(c);
2885 ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2886 byte[] password = zk.getSessionPasswd();
2887 long sessionID = zk.getSessionId();
2888
2889
2890
2891
2892
2893
2894
2895
2896 ZooKeeper monitor = new ZooKeeper(quorumServers,
2897 1000, new org.apache.zookeeper.Watcher(){
2898 @Override
2899 public void process(WatchedEvent watchedEvent) {
2900 LOG.info("Monitor ZKW received event="+watchedEvent);
2901 }
2902 } , sessionID, password);
2903
2904
2905 ZooKeeper newZK = new ZooKeeper(quorumServers,
2906 1000, EmptyWatcher.instance, sessionID, password);
2907
2908
2909
2910 long start = System.currentTimeMillis();
2911 while (newZK.getState() != States.CONNECTED
2912 && System.currentTimeMillis() - start < 1000) {
2913 Thread.sleep(1);
2914 }
2915 newZK.close();
2916 LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2917
2918
2919 monitor.close();
2920
2921 if (checkStatus) {
2922 new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close();
2923 }
2924 }
2925
2926
2927
2928
2929
2930
2931
2932 public MiniHBaseCluster getHBaseCluster() {
2933 return getMiniHBaseCluster();
2934 }
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944 public HBaseCluster getHBaseClusterInterface() {
2945
2946
2947 return hbaseCluster;
2948 }
2949
2950
2951
2952
2953
2954
2955
2956 public Connection getConnection() throws IOException {
2957 if (this.connection == null) {
2958 this.connection = ConnectionFactory.createConnection(this.conf);
2959 }
2960 return this.connection;
2961 }
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 public synchronized HBaseAdmin getHBaseAdmin()
2973 throws IOException {
2974 if (hbaseAdmin == null){
2975 this.hbaseAdmin = new HBaseAdminForTests(getConnection());
2976 }
2977 return hbaseAdmin;
2978 }
2979
2980 private HBaseAdminForTests hbaseAdmin = null;
2981 private static class HBaseAdminForTests extends HBaseAdmin {
2982 public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
2983 ZooKeeperConnectionException, IOException {
2984 super(connection);
2985 }
2986
2987 @Override
2988 public synchronized void close() throws IOException {
2989 LOG.warn("close() called on HBaseAdmin instance returned from " +
2990 "HBaseTestingUtility.getHBaseAdmin()");
2991 }
2992
2993 private synchronized void close0() throws IOException {
2994 super.close();
2995 }
2996 }
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007 public synchronized ZooKeeperWatcher getZooKeeperWatcher()
3008 throws IOException {
3009 if (zooKeeperWatcher == null) {
3010 zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility",
3011 new Abortable() {
3012 @Override public void abort(String why, Throwable e) {
3013 throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
3014 }
3015 @Override public boolean isAborted() {return false;}
3016 });
3017 }
3018 return zooKeeperWatcher;
3019 }
3020 private ZooKeeperWatcher zooKeeperWatcher;
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030 public void closeRegion(String regionName) throws IOException {
3031 closeRegion(Bytes.toBytes(regionName));
3032 }
3033
3034
3035
3036
3037
3038
3039
3040 public void closeRegion(byte[] regionName) throws IOException {
3041 getHBaseAdmin().closeRegion(regionName, null);
3042 }
3043
3044
3045
3046
3047
3048
3049
3050
3051 public void closeRegionByRow(String row, RegionLocator table) throws IOException {
3052 closeRegionByRow(Bytes.toBytes(row), table);
3053 }
3054
3055
3056
3057
3058
3059
3060
3061
3062 public void closeRegionByRow(byte[] row, RegionLocator table) throws IOException {
3063 HRegionLocation hrl = table.getRegionLocation(row);
3064 closeRegion(hrl.getRegionInfo().getRegionName());
3065 }
3066
3067
3068
3069
3070
3071
3072
3073
3074 public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
3075 List<HRegion> regions = getHBaseCluster().getRegions(tableName);
3076 int regCount = regions.size();
3077 Set<Integer> attempted = new HashSet<Integer>();
3078 int idx;
3079 int attempts = 0;
3080 do {
3081 regions = getHBaseCluster().getRegions(tableName);
3082 if (regCount != regions.size()) {
3083
3084 attempted.clear();
3085 }
3086 regCount = regions.size();
3087
3088
3089 if (regCount > 0) {
3090 idx = random.nextInt(regCount);
3091
3092 if (attempted.contains(idx))
3093 continue;
3094 try {
3095 regions.get(idx).checkSplit();
3096 return regions.get(idx);
3097 } catch (Exception ex) {
3098 LOG.warn("Caught exception", ex);
3099 attempted.add(idx);
3100 }
3101 }
3102 attempts++;
3103 } while (maxAttempts == -1 || attempts < maxAttempts);
3104 return null;
3105 }
3106
3107 public MiniZooKeeperCluster getZkCluster() {
3108 return zkCluster;
3109 }
3110
3111 public void setZkCluster(MiniZooKeeperCluster zkCluster) {
3112 this.passedZkCluster = true;
3113 this.zkCluster = zkCluster;
3114 conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
3115 }
3116
3117 public MiniDFSCluster getDFSCluster() {
3118 return dfsCluster;
3119 }
3120
3121 public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3122 setDFSCluster(cluster, true);
3123 }
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133 public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3134 throws IllegalStateException, IOException {
3135 if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3136 throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3137 }
3138 this.dfsCluster = cluster;
3139 this.setFs();
3140 }
3141
3142 public FileSystem getTestFileSystem() throws IOException {
3143 return HFileSystem.get(conf);
3144 }
3145
3146
3147
3148
3149
3150
3151
3152
3153 public void waitTableAvailable(TableName table)
3154 throws InterruptedException, IOException {
3155 waitTableAvailable(table.getName(), 30000);
3156 }
3157
3158 public void waitTableAvailable(TableName table, long timeoutMillis)
3159 throws InterruptedException, IOException {
3160 waitFor(timeoutMillis, predicateTableAvailable(table));
3161 }
3162
3163 public String explainTableAvailability(TableName tableName) throws IOException {
3164 String msg = explainTableState(tableName) + ",";
3165 if (getHBaseCluster().getMaster().isAlive()) {
3166 Map<HRegionInfo, ServerName> assignments =
3167 getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
3168 .getRegionAssignments();
3169 final List<Pair<HRegionInfo, ServerName>> metaLocations =
3170 MetaTableAccessor
3171 .getTableRegionsAndLocations(getZooKeeperWatcher(), connection, tableName);
3172 for (Pair<HRegionInfo, ServerName> metaLocation : metaLocations) {
3173 HRegionInfo hri = metaLocation.getFirst();
3174 ServerName sn = metaLocation.getSecond();
3175 if (!assignments.containsKey(hri)) {
3176 msg += ", region " + hri
3177 + " not assigned, but found in meta, it expected to be on " + sn;
3178
3179 } else if (sn == null) {
3180 msg += ", region " + hri
3181 + " assigned, but has no server in meta";
3182 } else if (!sn.equals(assignments.get(hri))) {
3183 msg += ", region " + hri
3184 + " assigned, but has different servers in meta and AM ( " +
3185 sn + " <> " + assignments.get(hri);
3186 }
3187 }
3188 }
3189 return msg;
3190 }
3191
3192 public String explainTableState(TableName tableName) throws IOException {
3193 try {
3194 if (getHBaseAdmin().isTableEnabled(tableName))
3195 return "table enabled in zk";
3196 else if (getHBaseAdmin().isTableDisabled(tableName))
3197 return "table disabled in zk";
3198 else
3199 return "table in uknown state";
3200 } catch (TableNotFoundException e) {
3201 return "table not exists";
3202 }
3203 }
3204
3205
3206
3207
3208
3209
3210
3211
3212 public void waitTableAvailable(byte[] table, long timeoutMillis)
3213 throws InterruptedException, IOException {
3214 waitTableAvailable(getHBaseAdmin(), table, timeoutMillis);
3215 }
3216
3217 public void waitTableAvailable(Admin admin, byte[] table, long timeoutMillis)
3218 throws InterruptedException, IOException {
3219 long startWait = System.currentTimeMillis();
3220 while (!admin.isTableAvailable(TableName.valueOf(table))) {
3221 assertTrue("Timed out waiting for table to become available " +
3222 Bytes.toStringBinary(table),
3223 System.currentTimeMillis() - startWait < timeoutMillis);
3224 Thread.sleep(200);
3225 }
3226 }
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237 public void waitTableEnabled(TableName table)
3238 throws InterruptedException, IOException {
3239 waitTableEnabled(table, 30000);
3240 }
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251 public void waitTableEnabled(byte[] table, long timeoutMillis)
3252 throws InterruptedException, IOException {
3253 waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3254 }
3255
3256 public void waitTableEnabled(TableName table, long timeoutMillis)
3257 throws IOException {
3258 waitFor(timeoutMillis, predicateTableEnabled(table));
3259 }
3260
3261
3262
3263
3264
3265
3266
3267
3268 public void waitTableDisabled(byte[] table)
3269 throws InterruptedException, IOException {
3270 waitTableDisabled(getHBaseAdmin(), table, 30000);
3271 }
3272
3273 public void waitTableDisabled(Admin admin, byte[] table)
3274 throws InterruptedException, IOException {
3275 waitTableDisabled(admin, table, 30000);
3276 }
3277
3278
3279
3280
3281
3282
3283
3284
3285 public void waitTableDisabled(byte[] table, long timeoutMillis)
3286 throws InterruptedException, IOException {
3287 waitTableDisabled(getHBaseAdmin(), table, timeoutMillis);
3288 }
3289
3290 public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis)
3291 throws InterruptedException, IOException {
3292 TableName tableName = TableName.valueOf(table);
3293 long startWait = System.currentTimeMillis();
3294 while (!admin.isTableDisabled(tableName)) {
3295 assertTrue("Timed out waiting for table to become disabled " +
3296 Bytes.toStringBinary(table),
3297 System.currentTimeMillis() - startWait < timeoutMillis);
3298 Thread.sleep(200);
3299 }
3300 }
3301
3302
3303
3304
3305
3306
3307
3308
3309 public boolean ensureSomeRegionServersAvailable(final int num)
3310 throws IOException {
3311 boolean startedServer = false;
3312 MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3313 for (int i=hbaseCluster.getLiveRegionServerThreads().size(); i<num; ++i) {
3314 LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3315 startedServer = true;
3316 }
3317
3318 return startedServer;
3319 }
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330 public boolean ensureSomeNonStoppedRegionServersAvailable(final int num)
3331 throws IOException {
3332 boolean startedServer = ensureSomeRegionServersAvailable(num);
3333
3334 int nonStoppedServers = 0;
3335 for (JVMClusterUtil.RegionServerThread rst :
3336 getMiniHBaseCluster().getRegionServerThreads()) {
3337
3338 HRegionServer hrs = rst.getRegionServer();
3339 if (hrs.isStopping() || hrs.isStopped()) {
3340 LOG.info("A region server is stopped or stopping:"+hrs);
3341 } else {
3342 nonStoppedServers++;
3343 }
3344 }
3345 for (int i=nonStoppedServers; i<num; ++i) {
3346 LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3347 startedServer = true;
3348 }
3349 return startedServer;
3350 }
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362 public static User getDifferentUser(final Configuration c,
3363 final String differentiatingSuffix)
3364 throws IOException {
3365 FileSystem currentfs = FileSystem.get(c);
3366 if (!(currentfs instanceof DistributedFileSystem)) {
3367 return User.getCurrent();
3368 }
3369
3370
3371 String username = User.getCurrent().getName() +
3372 differentiatingSuffix;
3373 User user = User.createUserForTesting(c, username,
3374 new String[]{"supergroup"});
3375 return user;
3376 }
3377
3378 public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3379 throws IOException {
3380 NavigableSet<String> online = new TreeSet<String>();
3381 for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3382 try {
3383 for (HRegionInfo region :
3384 ProtobufUtil.getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3385 online.add(region.getRegionNameAsString());
3386 }
3387 } catch (RegionServerStoppedException e) {
3388
3389 }
3390 }
3391 for (MasterThread mt : cluster.getLiveMasterThreads()) {
3392 try {
3393 for (HRegionInfo region :
3394 ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3395 online.add(region.getRegionNameAsString());
3396 }
3397 } catch (RegionServerStoppedException e) {
3398
3399 } catch (ServerNotRunningYetException e) {
3400
3401 }
3402 }
3403 return online;
3404 }
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419 public static void setMaxRecoveryErrorCount(final OutputStream stream,
3420 final int max) {
3421 try {
3422 Class<?> [] clazzes = DFSClient.class.getDeclaredClasses();
3423 for (Class<?> clazz: clazzes) {
3424 String className = clazz.getSimpleName();
3425 if (className.equals("DFSOutputStream")) {
3426 if (clazz.isInstance(stream)) {
3427 Field maxRecoveryErrorCountField =
3428 stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3429 maxRecoveryErrorCountField.setAccessible(true);
3430 maxRecoveryErrorCountField.setInt(stream, max);
3431 break;
3432 }
3433 }
3434 }
3435 } catch (Exception e) {
3436 LOG.info("Could not set max recovery field", e);
3437 }
3438 }
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448 public void moveRegionAndWait(HRegionInfo destRegion, ServerName destServer)
3449 throws InterruptedException, IOException {
3450 HMaster master = getMiniHBaseCluster().getMaster();
3451 getHBaseAdmin().move(destRegion.getEncodedNameAsBytes(),
3452 Bytes.toBytes(destServer.getServerName()));
3453 while (true) {
3454 ServerName serverName = master.getAssignmentManager().getRegionStates()
3455 .getRegionServerOfRegion(destRegion);
3456 if (serverName != null && serverName.equals(destServer)) {
3457 assertRegionOnServer(destRegion, serverName, 200);
3458 break;
3459 }
3460 Thread.sleep(10);
3461 }
3462 }
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473 public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3474 waitUntilAllRegionsAssigned(
3475 tableName,
3476 this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3477 }
3478
3479
3480
3481
3482
3483 public void waitUntilAllSystemRegionsAssigned() throws IOException {
3484 waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3485 waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3486 if(BackupManager.isBackupEnabled(conf)){
3487 waitUntilAllRegionsAssigned(TableName.BACKUP_TABLE_NAME);
3488 }
3489 }
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500 public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3501 throws IOException {
3502 final Table meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME);
3503 try {
3504 waitFor(timeout, 200, true, new Predicate<IOException>() {
3505 @Override
3506 public boolean evaluate() throws IOException {
3507 boolean allRegionsAssigned = true;
3508 Scan scan = new Scan();
3509 scan.addFamily(HConstants.CATALOG_FAMILY);
3510 ResultScanner s = meta.getScanner(scan);
3511 try {
3512 Result r;
3513 while ((r = s.next()) != null) {
3514 byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3515 HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3516 if (info != null && info.getTable().equals(tableName)) {
3517 b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3518 allRegionsAssigned &= (b != null);
3519 }
3520 }
3521 } finally {
3522 s.close();
3523 }
3524 return allRegionsAssigned;
3525 }
3526 });
3527 } finally {
3528 meta.close();
3529 }
3530
3531
3532 if (!getHBaseClusterInterface().isDistributedCluster()) {
3533
3534
3535 HMaster master = getHBaseCluster().getMaster();
3536 final RegionStates states = master.getAssignmentManager().getRegionStates();
3537 waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3538 @Override
3539 public String explainFailure() throws IOException {
3540 return explainTableAvailability(tableName);
3541 }
3542
3543 @Override
3544 public boolean evaluate() throws IOException {
3545 List<HRegionInfo> hris = states.getRegionsOfTable(tableName);
3546 return hris != null && !hris.isEmpty();
3547 }
3548 });
3549 }
3550 }
3551
3552
3553
3554
3555
3556 public static List<Cell> getFromStoreFile(HStore store,
3557 Get get) throws IOException {
3558 Scan scan = new Scan(get);
3559 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3560 scan.getFamilyMap().get(store.getFamily().getName()),
3561
3562
3563 0);
3564
3565 List<Cell> result = new ArrayList<Cell>();
3566 scanner.next(result);
3567 if (!result.isEmpty()) {
3568
3569 Cell kv = result.get(0);
3570 if (!CellUtil.matchingRow(kv, get.getRow())) {
3571 result.clear();
3572 }
3573 }
3574 scanner.close();
3575 return result;
3576 }
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586 public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions){
3587 assertTrue(numRegions>3);
3588 byte [][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3589 byte [][] result = new byte[tmpSplitKeys.length+1][];
3590 System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3591 result[0] = HConstants.EMPTY_BYTE_ARRAY;
3592 return result;
3593 }
3594
3595
3596
3597
3598
3599 public static List<Cell> getFromStoreFile(HStore store,
3600 byte [] row,
3601 NavigableSet<byte[]> columns
3602 ) throws IOException {
3603 Get get = new Get(row);
3604 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3605 s.put(store.getFamily().getName(), columns);
3606
3607 return getFromStoreFile(store,get);
3608 }
3609
3610
3611
3612
3613
3614 public static ZooKeeperWatcher getZooKeeperWatcher(
3615 HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
3616 IOException {
3617 ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
3618 "unittest", new Abortable() {
3619 boolean aborted = false;
3620
3621 @Override
3622 public void abort(String why, Throwable e) {
3623 aborted = true;
3624 throw new RuntimeException("Fatal ZK error, why=" + why, e);
3625 }
3626
3627 @Override
3628 public boolean isAborted() {
3629 return aborted;
3630 }
3631 });
3632 return zkw;
3633 }
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3647 HBaseTestingUtility TEST_UTIL, Region region,
3648 ServerName serverName) throws ZooKeeperConnectionException,
3649 IOException, KeeperException, NodeExistsException {
3650 return createAndForceNodeToOpenedState(TEST_UTIL, (HRegion)region, serverName);
3651 }
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664 public static ZooKeeperWatcher createAndForceNodeToOpenedState(
3665 HBaseTestingUtility TEST_UTIL, HRegion region,
3666 ServerName serverName) throws ZooKeeperConnectionException,
3667 IOException, KeeperException, NodeExistsException {
3668 ZooKeeperWatcher zkw = getZooKeeperWatcher(TEST_UTIL);
3669 ZKAssign.createNodeOffline(zkw, region.getRegionInfo(), serverName);
3670 int version = ZKAssign.transitionNodeOpening(zkw, region
3671 .getRegionInfo(), serverName);
3672 ZKAssign.transitionNodeOpened(zkw, region.getRegionInfo(), serverName,
3673 version);
3674 return zkw;
3675 }
3676
3677 public static void assertKVListsEqual(String additionalMsg,
3678 final List<? extends Cell> expected,
3679 final List<? extends Cell> actual) {
3680 final int eLen = expected.size();
3681 final int aLen = actual.size();
3682 final int minLen = Math.min(eLen, aLen);
3683
3684 int i;
3685 for (i = 0; i < minLen
3686 && KeyValue.COMPARATOR.compare(expected.get(i), actual.get(i)) == 0;
3687 ++i) {}
3688
3689 if (additionalMsg == null) {
3690 additionalMsg = "";
3691 }
3692 if (!additionalMsg.isEmpty()) {
3693 additionalMsg = ". " + additionalMsg;
3694 }
3695
3696 if (eLen != aLen || i != minLen) {
3697 throw new AssertionError(
3698 "Expected and actual KV arrays differ at position " + i + ": " +
3699 safeGetAsStr(expected, i) + " (length " + eLen +") vs. " +
3700 safeGetAsStr(actual, i) + " (length " + aLen + ")" + additionalMsg);
3701 }
3702 }
3703
3704 public static <T> String safeGetAsStr(List<T> lst, int i) {
3705 if (0 <= i && i < lst.size()) {
3706 return lst.get(i).toString();
3707 } else {
3708 return "<out_of_range>";
3709 }
3710 }
3711
3712 public String getClusterKey() {
3713 return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
3714 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
3715 + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
3716 HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3717 }
3718
3719
3720 public HTable createRandomTable(String tableName,
3721 final Collection<String> families,
3722 final int maxVersions,
3723 final int numColsPerRow,
3724 final int numFlushes,
3725 final int numRegions,
3726 final int numRowsPerFlush)
3727 throws IOException, InterruptedException {
3728
3729 LOG.info("\n\nCreating random table " + tableName + " with " + numRegions +
3730 " regions, " + numFlushes + " storefiles per region, " +
3731 numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
3732 "\n");
3733
3734 final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
3735 final int numCF = families.size();
3736 final byte[][] cfBytes = new byte[numCF][];
3737 {
3738 int cfIndex = 0;
3739 for (String cf : families) {
3740 cfBytes[cfIndex++] = Bytes.toBytes(cf);
3741 }
3742 }
3743
3744 final int actualStartKey = 0;
3745 final int actualEndKey = Integer.MAX_VALUE;
3746 final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3747 final int splitStartKey = actualStartKey + keysPerRegion;
3748 final int splitEndKey = actualEndKey - keysPerRegion;
3749 final String keyFormat = "%08x";
3750 final HTable table = createTable(tableName, cfBytes,
3751 maxVersions,
3752 Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3753 Bytes.toBytes(String.format(keyFormat, splitEndKey)),
3754 numRegions);
3755
3756 if (hbaseCluster != null) {
3757 getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3758 }
3759
3760 for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3761 for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3762 final byte[] row = Bytes.toBytes(String.format(keyFormat,
3763 actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3764
3765 Put put = new Put(row);
3766 Delete del = new Delete(row);
3767 for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3768 final byte[] cf = cfBytes[rand.nextInt(numCF)];
3769 final long ts = rand.nextInt();
3770 final byte[] qual = Bytes.toBytes("col" + iCol);
3771 if (rand.nextBoolean()) {
3772 final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
3773 "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
3774 ts + "_random_" + rand.nextLong());
3775 put.add(cf, qual, ts, value);
3776 } else if (rand.nextDouble() < 0.8) {
3777 del.deleteColumn(cf, qual, ts);
3778 } else {
3779 del.deleteColumns(cf, qual, ts);
3780 }
3781 }
3782
3783 if (!put.isEmpty()) {
3784 table.put(put);
3785 }
3786
3787 if (!del.isEmpty()) {
3788 table.delete(del);
3789 }
3790 }
3791 LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3792 table.flushCommits();
3793 if (hbaseCluster != null) {
3794 getMiniHBaseCluster().flushcache(table.getName());
3795 }
3796 }
3797
3798 return table;
3799 }
3800
3801 private static final int MIN_RANDOM_PORT = 0xc000;
3802 private static final int MAX_RANDOM_PORT = 0xfffe;
3803 private static Random random = new Random();
3804
3805
3806
3807
3808
3809 public static int randomPort() {
3810 return MIN_RANDOM_PORT
3811 + random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
3812 }
3813
3814
3815
3816
3817
3818 public static int randomFreePort() {
3819 int port = 0;
3820 do {
3821 port = randomPort();
3822 if (takenRandomPorts.contains(port)) {
3823 continue;
3824 }
3825 takenRandomPorts.add(port);
3826
3827 try {
3828 ServerSocket sock = new ServerSocket(port);
3829 sock.close();
3830 } catch (IOException ex) {
3831 port = 0;
3832 }
3833 } while (port == 0);
3834 return port;
3835 }
3836
3837
3838 public static String randomMultiCastAddress() {
3839 return "226.1.1." + random.nextInt(254);
3840 }
3841
3842
3843
3844 public static void waitForHostPort(String host, int port)
3845 throws IOException {
3846 final int maxTimeMs = 10000;
3847 final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3848 IOException savedException = null;
3849 LOG.info("Waiting for server at " + host + ":" + port);
3850 for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3851 try {
3852 Socket sock = new Socket(InetAddress.getByName(host), port);
3853 sock.close();
3854 savedException = null;
3855 LOG.info("Server at " + host + ":" + port + " is available");
3856 break;
3857 } catch (UnknownHostException e) {
3858 throw new IOException("Failed to look up " + host, e);
3859 } catch (IOException e) {
3860 savedException = e;
3861 }
3862 Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3863 }
3864
3865 if (savedException != null) {
3866 throw savedException;
3867 }
3868 }
3869
3870
3871
3872
3873
3874
3875 public static int createPreSplitLoadTestTable(Configuration conf,
3876 TableName tableName, byte[] columnFamily, Algorithm compression,
3877 DataBlockEncoding dataBlockEncoding) throws IOException {
3878 return createPreSplitLoadTestTable(conf, tableName,
3879 columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
3880 Durability.USE_DEFAULT);
3881 }
3882
3883
3884
3885
3886
3887 public static int createPreSplitLoadTestTable(Configuration conf,
3888 TableName tableName, byte[] columnFamily, Algorithm compression,
3889 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3890 Durability durability)
3891 throws IOException {
3892 HTableDescriptor desc = new HTableDescriptor(tableName);
3893 desc.setDurability(durability);
3894 desc.setRegionReplication(regionReplication);
3895 HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3896 hcd.setDataBlockEncoding(dataBlockEncoding);
3897 hcd.setCompressionType(compression);
3898 return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3899 }
3900
3901
3902
3903
3904
3905
3906 public static int createPreSplitLoadTestTable(Configuration conf,
3907 TableName tableName, byte[][] columnFamilies, Algorithm compression,
3908 DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
3909 Durability durability)
3910 throws IOException {
3911 HTableDescriptor desc = new HTableDescriptor(tableName);
3912 desc.setDurability(durability);
3913 desc.setRegionReplication(regionReplication);
3914 HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
3915 for (int i = 0; i < columnFamilies.length; i++) {
3916 HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
3917 hcd.setDataBlockEncoding(dataBlockEncoding);
3918 hcd.setCompressionType(compression);
3919 hcds[i] = hcd;
3920 }
3921 return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
3922 }
3923
3924
3925
3926
3927
3928
3929 public static int createPreSplitLoadTestTable(Configuration conf,
3930 HTableDescriptor desc, HColumnDescriptor hcd) throws IOException {
3931 return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3932 }
3933
3934
3935
3936
3937
3938
3939 public static int createPreSplitLoadTestTable(Configuration conf,
3940 HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException {
3941 return createPreSplitLoadTestTable(conf, desc, new HColumnDescriptor[] {hcd},
3942 numRegionsPerServer);
3943 }
3944
3945
3946
3947
3948
3949
3950 public static int createPreSplitLoadTestTable(Configuration conf,
3951 HTableDescriptor desc, HColumnDescriptor[] hcds, int numRegionsPerServer) throws IOException {
3952 for (HColumnDescriptor hcd : hcds) {
3953 if (!desc.hasFamily(hcd.getName())) {
3954 desc.addFamily(hcd);
3955 }
3956 }
3957
3958 int totalNumberOfRegions = 0;
3959 Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3960 Admin admin = unmanagedConnection.getAdmin();
3961
3962 try {
3963
3964
3965
3966 int numberOfServers = admin.getClusterStatus().getServers().size();
3967 if (numberOfServers == 0) {
3968 throw new IllegalStateException("No live regionservers");
3969 }
3970
3971 totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3972 LOG.info("Number of live regionservers: " + numberOfServers + ", " +
3973 "pre-splitting table into " + totalNumberOfRegions + " regions " +
3974 "(regions per server: " + numRegionsPerServer + ")");
3975
3976 byte[][] splits = new RegionSplitter.HexStringSplit().split(
3977 totalNumberOfRegions);
3978
3979 admin.createTable(desc, splits);
3980 } catch (MasterNotRunningException e) {
3981 LOG.error("Master not running", e);
3982 throw new IOException(e);
3983 } catch (TableExistsException e) {
3984 LOG.warn("Table " + desc.getTableName() +
3985 " already exists, continuing");
3986 } finally {
3987 admin.close();
3988 unmanagedConnection.close();
3989 }
3990 return totalNumberOfRegions;
3991 }
3992
3993 public static int getMetaRSPort(Configuration conf) throws IOException {
3994 try (Connection c = ConnectionFactory.createConnection();
3995 RegionLocator locator = c.getRegionLocator(TableName.META_TABLE_NAME)) {
3996 return locator.getRegionLocation(Bytes.toBytes("")).getPort();
3997 }
3998 }
3999
4000
4001
4002
4003
4004
4005
4006 public void assertRegionOnServer(
4007 final HRegionInfo hri, final ServerName server,
4008 final long timeout) throws IOException, InterruptedException {
4009 long timeoutTime = System.currentTimeMillis() + timeout;
4010 while (true) {
4011 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
4012 if (regions.contains(hri)) return;
4013 long now = System.currentTimeMillis();
4014 if (now > timeoutTime) break;
4015 Thread.sleep(10);
4016 }
4017 fail("Could not find region " + hri.getRegionNameAsString()
4018 + " on server " + server);
4019 }
4020
4021
4022
4023
4024
4025 public void assertRegionOnlyOnServer(
4026 final HRegionInfo hri, final ServerName server,
4027 final long timeout) throws IOException, InterruptedException {
4028 long timeoutTime = System.currentTimeMillis() + timeout;
4029 while (true) {
4030 List<HRegionInfo> regions = getHBaseAdmin().getOnlineRegions(server);
4031 if (regions.contains(hri)) {
4032 List<JVMClusterUtil.RegionServerThread> rsThreads =
4033 getHBaseCluster().getLiveRegionServerThreads();
4034 for (JVMClusterUtil.RegionServerThread rsThread: rsThreads) {
4035 HRegionServer rs = rsThread.getRegionServer();
4036 if (server.equals(rs.getServerName())) {
4037 continue;
4038 }
4039 Collection<Region> hrs = rs.getOnlineRegionsLocalContext();
4040 for (Region r: hrs) {
4041 assertTrue("Region should not be double assigned",
4042 r.getRegionInfo().getRegionId() != hri.getRegionId());
4043 }
4044 }
4045 return;
4046 }
4047 long now = System.currentTimeMillis();
4048 if (now > timeoutTime) break;
4049 Thread.sleep(10);
4050 }
4051 fail("Could not find region " + hri.getRegionNameAsString()
4052 + " on server " + server);
4053 }
4054
4055 public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
4056 throws IOException {
4057 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
4058 htd.addFamily(hcd);
4059 HRegionInfo info =
4060 new HRegionInfo(TableName.valueOf(tableName), null, null, false);
4061 HRegion region =
4062 HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
4063 return region;
4064 }
4065
4066 public void setFileSystemURI(String fsURI) {
4067 FS_URI = fsURI;
4068 }
4069
4070
4071
4072
4073 public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
4074 throws E {
4075 return Waiter.waitFor(this.conf, timeout, predicate);
4076 }
4077
4078
4079
4080
4081 public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
4082 throws E {
4083 return Waiter.waitFor(this.conf, timeout, interval, predicate);
4084 }
4085
4086
4087
4088
4089 public <E extends Exception> long waitFor(long timeout, long interval,
4090 boolean failIfTimeout, Predicate<E> predicate) throws E {
4091 return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
4092 }
4093
4094
4095
4096
4097 public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
4098 return new ExplainingPredicate<IOException>() {
4099 @Override
4100 public String explainFailure() throws IOException {
4101 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4102 .getAssignmentManager().getRegionStates();
4103 return "found in transition: " + regionStates.getRegionsInTransition().toString();
4104 }
4105
4106 @Override
4107 public boolean evaluate() throws IOException {
4108 final RegionStates regionStates = getMiniHBaseCluster().getMaster()
4109 .getAssignmentManager().getRegionStates();
4110 return !regionStates.isRegionsInTransition();
4111 }
4112 };
4113 }
4114
4115
4116
4117
4118 public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
4119 return new ExplainingPredicate<IOException>() {
4120 @Override
4121 public String explainFailure() throws IOException {
4122 return explainTableState(tableName);
4123 }
4124
4125 @Override
4126 public boolean evaluate() throws IOException {
4127 return getHBaseAdmin().tableExists(tableName) && getHBaseAdmin().isTableEnabled(tableName);
4128 }
4129 };
4130 }
4131
4132
4133
4134
4135 public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
4136 return new ExplainingPredicate<IOException>() {
4137 @Override
4138 public String explainFailure() throws IOException {
4139 return explainTableState(tableName);
4140 }
4141
4142 @Override
4143 public boolean evaluate() throws IOException {
4144 return getHBaseAdmin().isTableDisabled(tableName);
4145 }
4146 };
4147 }
4148
4149
4150
4151
4152 public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
4153 return new ExplainingPredicate<IOException>() {
4154 @Override
4155 public String explainFailure() throws IOException {
4156 return explainTableAvailability(tableName);
4157 }
4158
4159 @Override
4160 public boolean evaluate() throws IOException {
4161 boolean tableAvailable = getHBaseAdmin().isTableAvailable(tableName);
4162 if (tableAvailable) {
4163 try {
4164 Canary.sniff(getHBaseAdmin(), tableName);
4165 } catch (Exception e) {
4166 throw new IOException("Canary sniff failed for table " + tableName, e);
4167 }
4168 }
4169 return tableAvailable;
4170 }
4171 };
4172 }
4173
4174
4175
4176
4177
4178
4179 public void waitUntilNoRegionsInTransition(
4180 final long timeout) throws Exception {
4181 waitFor(timeout, predicateNoRegionsInTransition());
4182 }
4183
4184
4185
4186
4187
4188
4189 public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4190 final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4191 waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4192
4193 @Override
4194 public boolean evaluate() {
4195 for (String label : labels) {
4196 if (labelsCache.getLabelOrdinal(label) == 0) {
4197 return false;
4198 }
4199 }
4200 return true;
4201 }
4202
4203 @Override
4204 public String explainFailure() {
4205 for (String label : labels) {
4206 if (labelsCache.getLabelOrdinal(label) == 0) {
4207 return label + " is not available yet";
4208 }
4209 }
4210 return "";
4211 }
4212 });
4213 }
4214
4215
4216
4217
4218
4219
4220 public static List<HColumnDescriptor> generateColumnDescriptors() {
4221 return generateColumnDescriptors("");
4222 }
4223
4224
4225
4226
4227
4228
4229
4230 public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4231 List<HColumnDescriptor> htds = new ArrayList<HColumnDescriptor>();
4232 long familyId = 0;
4233 for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
4234 for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
4235 for (BloomType bloomType: BloomType.values()) {
4236 String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4237 HColumnDescriptor htd = new HColumnDescriptor(name);
4238 htd.setCompressionType(compressionType);
4239 htd.setDataBlockEncoding(encodingType);
4240 htd.setBloomFilterType(bloomType);
4241 htds.add(htd);
4242 familyId++;
4243 }
4244 }
4245 }
4246 return htds;
4247 }
4248
4249
4250
4251
4252
4253 public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4254 String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4255 List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>();
4256 for (String algoName : allAlgos) {
4257 try {
4258 Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4259 algo.getCompressor();
4260 supportedAlgos.add(algo);
4261 } catch (Throwable t) {
4262
4263 }
4264 }
4265 return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4266 }
4267
4268
4269
4270
4271
4272
4273 public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4274 Properties conf = MiniKdc.createConf();
4275 conf.put(MiniKdc.DEBUG, true);
4276 MiniKdc kdc = null;
4277 File dir = null;
4278
4279
4280 boolean bindException;
4281 int numTries = 0;
4282 do {
4283 try {
4284 bindException = false;
4285 dir = new File(getDataTestDir("kdc").toUri().getPath());
4286 kdc = new MiniKdc(conf, dir);
4287 kdc.start();
4288 } catch (BindException e) {
4289 FileUtils.deleteDirectory(dir);
4290 numTries++;
4291 if (numTries == 3) {
4292 LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4293 throw e;
4294 }
4295 LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4296 bindException = true;
4297 }
4298 } while (bindException);
4299 HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4300 return kdc;
4301 }
4302 }