View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.util;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotEquals;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertNull;
26  import static org.junit.Assert.assertTrue;
27  
28  import java.io.File;
29  import java.io.IOException;
30  import java.util.UUID;
31  
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.fs.FSDataOutputStream;
34  import org.apache.hadoop.fs.FileStatus;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.fs.permission.FsPermission;
38  import org.apache.hadoop.hbase.HBaseConfiguration;
39  import org.apache.hadoop.hbase.HBaseTestingUtility;
40  import org.apache.hadoop.hbase.HConstants;
41  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
42  import org.apache.hadoop.hbase.exceptions.DeserializationException;
43  import org.apache.hadoop.hbase.fs.HFileSystem;
44  import org.apache.hadoop.hbase.testclassification.MediumTests;
45  import org.apache.hadoop.hdfs.MiniDFSCluster;
46  import org.junit.Test;
47  import org.junit.experimental.categories.Category;
48  
49  /**
50   * Test {@link FSUtils}.
51   */
52  @Category(MediumTests.class)
53  public class TestFSUtils {
54    /**
55     * Test path compare and prefix checking.
56     * @throws IOException
57     */
58    @Test
59    public void testMatchingTail() throws IOException {
60      HBaseTestingUtility htu = new HBaseTestingUtility();
61      final FileSystem fs = htu.getTestFileSystem();
62      Path rootdir = htu.getDataTestDir();
63      assertTrue(rootdir.depth() > 1);
64      Path partPath = new Path("a", "b");
65      Path fullPath = new Path(rootdir, partPath);
66      Path fullyQualifiedPath = fs.makeQualified(fullPath);
67      assertFalse(FSUtils.isMatchingTail(fullPath, partPath));
68      assertFalse(FSUtils.isMatchingTail(fullPath, partPath.toString()));
69      assertTrue(FSUtils.isStartingWithPath(rootdir, fullPath.toString()));
70      assertTrue(FSUtils.isStartingWithPath(fullyQualifiedPath, fullPath.toString()));
71      assertFalse(FSUtils.isStartingWithPath(rootdir, partPath.toString()));
72      assertFalse(FSUtils.isMatchingTail(fullyQualifiedPath, partPath));
73      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath));
74      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fullPath.toString()));
75      assertTrue(FSUtils.isMatchingTail(fullyQualifiedPath, fs.makeQualified(fullPath)));
76      assertTrue(FSUtils.isStartingWithPath(rootdir, fullyQualifiedPath.toString()));
77      assertFalse(FSUtils.isMatchingTail(fullPath, new Path("x")));
78      assertFalse(FSUtils.isMatchingTail(new Path("x"), fullPath));
79    }
80  
81    @Test
82    public void testVersion() throws DeserializationException, IOException {
83      HBaseTestingUtility htu = new HBaseTestingUtility();
84      final FileSystem fs = htu.getTestFileSystem();
85      final Path rootdir = htu.getDataTestDir();
86      assertNull(FSUtils.getVersion(fs, rootdir));
87      // Write out old format version file.  See if we can read it in and convert.
88      Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
89      FSDataOutputStream s = fs.create(versionFile);
90      final String version = HConstants.FILE_SYSTEM_VERSION;
91      s.writeUTF(version);
92      s.close();
93      assertTrue(fs.exists(versionFile));
94      FileStatus [] status = fs.listStatus(versionFile);
95      assertNotNull(status);
96      assertTrue(status.length > 0);
97      String newVersion = FSUtils.getVersion(fs, rootdir);
98      assertEquals(version.length(), newVersion.length());
99      assertEquals(version, newVersion);
100     // File will have been converted. Exercise the pb format
101     assertEquals(version, FSUtils.getVersion(fs, rootdir));
102     FSUtils.checkVersion(fs, rootdir, true);
103   }
104 
105   @Test public void testIsHDFS() throws Exception {
106     HBaseTestingUtility htu = new HBaseTestingUtility();
107     htu.getConfiguration().setBoolean("dfs.support.append", false);
108     assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
109     htu.getConfiguration().setBoolean("dfs.support.append", true);
110     MiniDFSCluster cluster = null;
111     try {
112       cluster = htu.startMiniDFSCluster(1);
113       assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
114       assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
115     } finally {
116       if (cluster != null) cluster.shutdown();
117     }
118   }
119 
120   private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
121     throws Exception {
122     FSDataOutputStream out = fs.create(file);
123     byte [] data = new byte[dataSize];
124     out.write(data, 0, dataSize);
125     out.close();
126   }
127 
128   @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
129     HBaseTestingUtility htu = new HBaseTestingUtility();
130     final int DEFAULT_BLOCK_SIZE = 1024;
131     htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
132     MiniDFSCluster cluster = null;
133     Path testFile = null;
134 
135     try {
136       // set up a cluster with 3 nodes
137       String hosts[] = new String[] { "host1", "host2", "host3" };
138       cluster = htu.startMiniDFSCluster(hosts);
139       cluster.waitActive();
140       FileSystem fs = cluster.getFileSystem();
141 
142       // create a file with two blocks
143       testFile = new Path("/test1.txt");
144       WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
145 
146       // given the default replication factor is 3, the same as the number of
147       // datanodes; the locality index for each host should be 100%,
148       // or getWeight for each host should be the same as getUniqueBlocksWeights
149       final long maxTime = System.currentTimeMillis() + 2000;
150       boolean ok;
151       do {
152         ok = true;
153         FileStatus status = fs.getFileStatus(testFile);
154         HDFSBlocksDistribution blocksDistribution =
155           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
156         long uniqueBlocksTotalWeight =
157           blocksDistribution.getUniqueBlocksTotalWeight();
158         for (String host : hosts) {
159           long weight = blocksDistribution.getWeight(host);
160           ok = (ok && uniqueBlocksTotalWeight == weight);
161         }
162       } while (!ok && System.currentTimeMillis() < maxTime);
163       assertTrue(ok);
164       } finally {
165       htu.shutdownMiniDFSCluster();
166     }
167 
168 
169     try {
170       // set up a cluster with 4 nodes
171       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
172       cluster = htu.startMiniDFSCluster(hosts);
173       cluster.waitActive();
174       FileSystem fs = cluster.getFileSystem();
175 
176       // create a file with three blocks
177       testFile = new Path("/test2.txt");
178       WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
179 
180       // given the default replication factor is 3, we will have total of 9
181       // replica of blocks; thus the host with the highest weight should have
182       // weight == 3 * DEFAULT_BLOCK_SIZE
183       final long maxTime = System.currentTimeMillis() + 2000;
184       long weight;
185       long uniqueBlocksTotalWeight;
186       do {
187         FileStatus status = fs.getFileStatus(testFile);
188         HDFSBlocksDistribution blocksDistribution =
189           FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
190         uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
191 
192         String tophost = blocksDistribution.getTopHosts().get(0);
193         weight = blocksDistribution.getWeight(tophost);
194 
195         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
196       } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
197       assertTrue(uniqueBlocksTotalWeight == weight);
198 
199     } finally {
200       htu.shutdownMiniDFSCluster();
201     }
202 
203 
204     try {
205       // set up a cluster with 4 nodes
206       String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
207       cluster = htu.startMiniDFSCluster(hosts);
208       cluster.waitActive();
209       FileSystem fs = cluster.getFileSystem();
210 
211       // create a file with one block
212       testFile = new Path("/test3.txt");
213       WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
214 
215       // given the default replication factor is 3, we will have total of 3
216       // replica of blocks; thus there is one host without weight
217       final long maxTime = System.currentTimeMillis() + 2000;
218       HDFSBlocksDistribution blocksDistribution;
219       do {
220         FileStatus status = fs.getFileStatus(testFile);
221         blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
222         // NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
223       }
224       while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
225       assertEquals("Wrong number of hosts distributing blocks.", 3,
226         blocksDistribution.getTopHosts().size());
227     } finally {
228       htu.shutdownMiniDFSCluster();
229     }
230   }
231 
232   @Test
233   public void testPermMask() throws Exception {
234 
235     Configuration conf = HBaseConfiguration.create();
236     FileSystem fs = FileSystem.get(conf);
237 
238     // default fs permission
239     FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
240         HConstants.DATA_FILE_UMASK_KEY);
241     // 'hbase.data.umask.enable' is false. We will get default fs permission.
242     assertEquals(FsPermission.getFileDefault(), defaultFsPerm);
243 
244     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
245     // first check that we don't crash if we don't have perms set
246     FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
247         HConstants.DATA_FILE_UMASK_KEY);
248     // default 'hbase.data.umask'is 000, and this umask will be used when
249     // 'hbase.data.umask.enable' is true.
250     // Therefore we will not get the real fs default in this case.
251     // Instead we will get the starting point FULL_RWX_PERMISSIONS
252     assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);
253 
254     conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
255     // now check that we get the right perms
256     FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
257         HConstants.DATA_FILE_UMASK_KEY);
258     assertEquals(new FsPermission("700"), filePerm);
259 
260     // then that the correct file is created
261     Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
262     try {
263       FSDataOutputStream out = FSUtils.create(fs, p, filePerm, null);
264       out.close();
265       FileStatus stat = fs.getFileStatus(p);
266       assertEquals(new FsPermission("700"), stat.getPermission());
267       // and then cleanup
268     } finally {
269       fs.delete(p, true);
270     }
271   }
272 
273   @Test
274   public void testDeleteAndExists() throws Exception {
275     HBaseTestingUtility htu = new HBaseTestingUtility();
276     Configuration conf = htu.getConfiguration();
277     conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
278     FileSystem fs = FileSystem.get(conf);
279     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
280     // then that the correct file is created
281     String file = UUID.randomUUID().toString();
282     Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
283     Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
284     try {
285       FSDataOutputStream out = FSUtils.create(fs, p, perms, null);
286       out.close();
287       assertTrue("The created file should be present", FSUtils.isExists(fs, p));
288       // delete the file with recursion as false. Only the file will be deleted.
289       FSUtils.delete(fs, p, false);
290       // Create another file
291       FSDataOutputStream out1 = FSUtils.create(fs, p1, perms, null);
292       out1.close();
293       // delete the file with recursion as false. Still the file only will be deleted
294       FSUtils.delete(fs, p1, true);
295       assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
296       // and then cleanup
297     } finally {
298       FSUtils.delete(fs, p, true);
299       FSUtils.delete(fs, p1, true);
300     }
301   }
302 
303   @Test
304   public void testRenameAndSetModifyTime() throws Exception {
305     HBaseTestingUtility htu = new HBaseTestingUtility();
306     Configuration conf = htu.getConfiguration();
307 
308     MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
309     assertTrue(FSUtils.isHDFS(conf));
310 
311     FileSystem fs = FileSystem.get(conf);
312     Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
313 
314     String file = UUID.randomUUID().toString();
315     Path p = new Path(testDir, file);
316 
317     FSDataOutputStream out = fs.create(p);
318     out.close();
319     assertTrue("The created file should be present", FSUtils.isExists(fs, p));
320 
321     long expect = System.currentTimeMillis() + 1000;
322     assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
323 
324     ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
325     mockEnv.setValue(expect);
326     EnvironmentEdgeManager.injectEdge(mockEnv);
327     try {
328       String dstFile = UUID.randomUUID().toString();
329       Path dst = new Path(testDir , dstFile);
330 
331       assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
332       assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
333       assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
334 
335       assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
336       cluster.shutdown();
337     } finally {
338       EnvironmentEdgeManager.reset();
339     }
340   }
341 
342   private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
343     HBaseTestingUtility htu = new HBaseTestingUtility();
344     Configuration conf = htu.getConfiguration();
345     conf.set(HConstants.WAL_STORAGE_POLICY, policy);
346 
347     MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
348     try {
349       assertTrue(FSUtils.isHDFS(conf));
350 
351       FileSystem fs = FileSystem.get(conf);
352       Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
353       fs.mkdirs(testDir);
354 
355       FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
356           HConstants.DEFAULT_WAL_STORAGE_POLICY);
357 
358       String file = UUID.randomUUID().toString();
359       Path p = new Path(testDir, file);
360       WriteDataToHDFS(fs, p, 4096);
361       // will assert existance before deleting.
362       cleanupFile(fs, testDir);
363     } finally {
364       cluster.shutdown();
365     }
366   }
367 
368   @Test
369   public void testSetStoragePolicyDefault() throws Exception {
370     verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
371   }
372 
373   /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
374   @Test
375   public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
376     verifyFileInDirWithStoragePolicy("ALL_SSD");
377   }
378 
379   /* should log a warning, but still work. (different warning on Hadoop < 2.6.0) */
380   @Test
381   public void testSetStoragePolicyInvalid() throws Exception {
382     verifyFileInDirWithStoragePolicy("1772");
383   }
384 
385   @Test
386   public void testSetWALRootDir() throws Exception {
387     HBaseTestingUtility htu = new HBaseTestingUtility();
388     Configuration conf = htu.getConfiguration();
389     Path p = new Path("file:///hbase/root");
390     FSUtils.setWALRootDir(conf, p);
391     assertEquals(p.toString(), conf.get(HFileSystem.HBASE_WAL_DIR));
392   }
393 
394   @Test
395   public void testGetWALRootDir() throws IOException {
396     HBaseTestingUtility htu = new HBaseTestingUtility();
397     Configuration conf = htu.getConfiguration();
398     Path root = new Path("file:///hbase/root");
399     Path walRoot = new Path("file:///hbase/logroot");
400     FSUtils.setRootDir(conf, root);
401     assertEquals(FSUtils.getRootDir(conf), root);
402     assertEquals(FSUtils.getWALRootDir(conf), root);
403     FSUtils.setWALRootDir(conf, walRoot);
404     assertEquals(FSUtils.getWALRootDir(conf), walRoot);
405   }
406 
407   @Test(expected=IllegalStateException.class)
408   public void testGetWALRootDirIllegalWALDir() throws IOException {
409     HBaseTestingUtility htu = new HBaseTestingUtility();
410     Configuration conf = htu.getConfiguration();
411     Path root = new Path("file:///hbase/root");
412     Path invalidWALDir = new Path("file:///hbase/root/logroot");
413     FSUtils.setRootDir(conf, root);
414     FSUtils.setWALRootDir(conf, invalidWALDir);
415     FSUtils.getWALRootDir(conf);
416   }
417 
418   @Test
419   public void testRemoveWALRootPath() throws Exception {
420     HBaseTestingUtility htu = new HBaseTestingUtility();
421     Configuration conf = htu.getConfiguration();
422     FSUtils.setRootDir(conf, new Path("file:///user/hbase"));
423     Path testFile = new Path(FSUtils.getRootDir(conf), "test/testfile");
424     Path tmpFile = new Path("file:///test/testfile");
425     assertEquals(FSUtils.removeWALRootPath(testFile, conf), "test/testfile");
426     assertEquals(FSUtils.removeWALRootPath(tmpFile, conf), tmpFile.toString());
427     FSUtils.setWALRootDir(conf, new Path("file:///user/hbaseLogDir"));
428     assertEquals(FSUtils.removeWALRootPath(testFile, conf), testFile.toString());
429     Path logFile = new Path(FSUtils.getWALRootDir(conf), "test/testlog");
430     assertEquals(FSUtils.removeWALRootPath(logFile, conf), "test/testlog");
431   }
432 
433   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
434     assertTrue(fileSys.exists(name));
435     assertTrue(fileSys.delete(name, true));
436     assertTrue(!fileSys.exists(name));
437   }
438 }