1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 package org.apache.hadoop.hbase.io.encoding;
18
19 import java.io.IOException;
20 import java.util.Arrays;
21 import java.util.Collection;
22 import java.util.Map;
23 import java.util.NavigableMap;
24
25 import org.apache.hadoop.hbase.HColumnDescriptor;
26 import org.apache.hadoop.hbase.HRegionInfo;
27 import org.apache.hadoop.hbase.testclassification.MediumTests;
28 import org.apache.hadoop.hbase.ServerName;
29 import org.apache.hadoop.hbase.client.HBaseAdmin;
30 import org.apache.hadoop.hbase.client.HTable;
31 import org.apache.hadoop.hbase.client.Result;
32 import org.apache.hadoop.hbase.client.ResultScanner;
33 import org.apache.hadoop.hbase.client.Scan;
34 import org.apache.hadoop.hbase.io.compress.Compression;
35 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
36 import org.apache.hadoop.hbase.regionserver.HRegionServer;
37 import org.apache.hadoop.hbase.util.TestMiniClusterLoadSequential;
38 import org.apache.hadoop.hbase.util.Threads;
39 import org.junit.Test;
40 import org.junit.experimental.categories.Category;
41 import org.junit.runners.Parameterized.Parameters;
42
43
44
45
46 @Category(MediumTests.class)
47 public class TestLoadAndSwitchEncodeOnDisk extends
48 TestMiniClusterLoadSequential {
49
50
51 private static final boolean USE_MULTI_PUT = true;
52
53
54 @Parameters
55 public static Collection<Object[]> parameters() {
56 return Arrays.asList(new Object[][]{ new Object[0] });
57 }
58
59 public TestLoadAndSwitchEncodeOnDisk() {
60 super(USE_MULTI_PUT, DataBlockEncoding.PREFIX);
61 conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
62 }
63
64 protected int numKeys() {
65 return 3000;
66 }
67
68 @Test(timeout=TIMEOUT_MS)
69 public void loadTest() throws Exception {
70 HBaseAdmin admin = new HBaseAdmin(conf);
71
72 compression = Compression.Algorithm.GZ;
73 super.loadTest();
74
75 HColumnDescriptor hcd = getColumnDesc(admin);
76 System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
77 HTable t = new HTable(this.conf, TABLE);
78 assertAllOnLine(t);
79
80 admin.disableTable(TABLE);
81 admin.modifyColumn(TABLE, hcd);
82
83 System.err.println("\nRe-enabling table\n");
84 admin.enableTable(TABLE);
85
86 System.err.println("\nNew column descriptor: " +
87 getColumnDesc(admin) + "\n");
88
89
90
91 assertAllOnLine(t);
92
93 System.err.println("\nCompacting the table\n");
94 admin.majorCompact(TABLE.getName());
95
96 Threads.sleepWithoutInterrupt(5000);
97 HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
98 while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
99 Threads.sleep(50);
100 }
101
102 System.err.println("\nDone with the test, shutting down the cluster\n");
103 }
104
105 private void assertAllOnLine(final HTable t) throws IOException {
106 NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
107 for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
108 byte [] startkey = e.getKey().getStartKey();
109 Scan s = new Scan(startkey);
110 ResultScanner scanner = t.getScanner(s);
111 Result r = scanner.next();
112 org.junit.Assert.assertTrue(r != null && r.size() > 0);
113 scanner.close();
114 }
115 }
116 }