1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.backup.util;
20
21 import java.io.IOException;
22 import java.io.InterruptedIOException;
23 import java.util.ArrayList;
24 import java.util.HashMap;
25 import java.util.Iterator;
26 import java.util.List;
27 import java.util.Map.Entry;
28 import java.util.TreeMap;
29
30 import org.apache.commons.logging.Log;
31 import org.apache.commons.logging.LogFactory;
32 import org.apache.hadoop.conf.Configuration;
33 import org.apache.hadoop.fs.FSDataOutputStream;
34 import org.apache.hadoop.fs.FileStatus;
35 import org.apache.hadoop.fs.FileSystem;
36 import org.apache.hadoop.fs.Path;
37 import org.apache.hadoop.fs.PathFilter;
38 import org.apache.hadoop.fs.permission.FsPermission;
39 import org.apache.hadoop.hbase.HConstants;
40 import org.apache.hadoop.hbase.HRegionInfo;
41 import org.apache.hadoop.hbase.ServerName;
42 import org.apache.hadoop.hbase.HTableDescriptor;
43 import org.apache.hadoop.hbase.TableName;
44 import org.apache.hadoop.hbase.backup.BackupInfo;
45 import org.apache.hadoop.hbase.backup.HBackupFileSystem;
46 import org.apache.hadoop.hbase.backup.impl.BackupException;
47 import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
48 import org.apache.hadoop.hbase.classification.InterfaceAudience;
49 import org.apache.hadoop.hbase.classification.InterfaceStability;
50 import org.apache.hadoop.hbase.client.Admin;
51 import org.apache.hadoop.hbase.client.Connection;
52 import org.apache.hadoop.hbase.client.ConnectionFactory;
53 import org.apache.hadoop.hbase.client.HBaseAdmin;
54 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
55 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
56 import org.apache.hadoop.hbase.regionserver.HRegion;
57 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
58 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
59 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
60 import org.apache.hadoop.hbase.util.FSTableDescriptors;
61 import org.apache.hadoop.hbase.util.FSUtils;
62 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
63
64
65
66
67 @InterfaceAudience.Private
68 @InterfaceStability.Evolving
69 public final class BackupServerUtil {
70 protected static final Log LOG = LogFactory.getLog(BackupServerUtil.class);
71 public static final String LOGNAME_SEPARATOR = ".";
72
73 private BackupServerUtil(){
74 throw new AssertionError("Instantiating utility class...");
75 }
76
77 public static void waitForSnapshot(SnapshotDescription snapshot, long max,
78 SnapshotManager snapshotMgr, Configuration conf) throws IOException {
79 boolean done = false;
80 long start = EnvironmentEdgeManager.currentTime();
81 int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
82 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
83 long maxPauseTime = max / numRetries;
84 int tries = 0;
85 LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
86 ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
87 maxPauseTime + " ms per retry)");
88 while (tries == 0
89 || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
90 try {
91
92 long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
93 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
94 long sleep = HBaseAdmin.getPauseTime(tries++, pause);
95 sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
96 LOG.debug("(#" + tries + ") Sleeping: " + sleep +
97 "ms while waiting for snapshot completion.");
98 Thread.sleep(sleep);
99 } catch (InterruptedException e) {
100 throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
101 }
102 LOG.debug("Getting current status of snapshot ...");
103 done = snapshotMgr.isSnapshotDone(snapshot);
104 }
105 if (!done) {
106 throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
107 + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
108 }
109 }
110
111
112
113
114
115
116
117 public static HashMap<String, Long> getRSLogTimestampMins(
118 HashMap<TableName, HashMap<String, Long>> rsLogTimestampMap) {
119
120 if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) {
121 return null;
122 }
123
124 HashMap<String, Long> rsLogTimestampMins = new HashMap<String, Long>();
125 HashMap<String, HashMap<TableName, Long>> rsLogTimestampMapByRS =
126 new HashMap<String, HashMap<TableName, Long>>();
127
128 for (Entry<TableName, HashMap<String, Long>> tableEntry : rsLogTimestampMap.entrySet()) {
129 TableName table = tableEntry.getKey();
130 HashMap<String, Long> rsLogTimestamp = tableEntry.getValue();
131 for (Entry<String, Long> rsEntry : rsLogTimestamp.entrySet()) {
132 String rs = rsEntry.getKey();
133 Long ts = rsEntry.getValue();
134 if (!rsLogTimestampMapByRS.containsKey(rs)) {
135 rsLogTimestampMapByRS.put(rs, new HashMap<TableName, Long>());
136 rsLogTimestampMapByRS.get(rs).put(table, ts);
137 } else {
138 rsLogTimestampMapByRS.get(rs).put(table, ts);
139 }
140 }
141 }
142
143 for (String rs : rsLogTimestampMapByRS.keySet()) {
144 rsLogTimestampMins.put(rs, BackupClientUtil.getMinValue(rsLogTimestampMapByRS.get(rs)));
145 }
146
147 return rsLogTimestampMins;
148 }
149
150
151
152
153
154
155
156
157
158 public static void copyTableRegionInfo(BackupInfo backupContext, Configuration conf)
159 throws IOException, InterruptedException {
160
161 Path rootDir = FSUtils.getRootDir(conf);
162 FileSystem fs = rootDir.getFileSystem(conf);
163
164
165
166 try (Connection conn = ConnectionFactory.createConnection(conf);
167 Admin admin = conn.getAdmin()) {
168
169 for (TableName table : backupContext.getTables()) {
170
171 if(!admin.tableExists(table)) {
172 LOG.warn("Table "+ table+" does not exists, skipping it.");
173 continue;
174 }
175 LOG.debug("Attempting to copy table info for:" + table);
176 HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
177
178
179 Path target = new Path(backupContext.getBackupStatus(table).getTargetDir());
180 FileSystem targetFs = target.getFileSystem(conf);
181 FSTableDescriptors descriptors =
182 new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
183 descriptors.createTableDescriptorForTableDirectory(target, orig, false);
184 LOG.debug("Finished copying tableinfo.");
185 List<HRegionInfo> regions = null;
186 regions = admin.getTableRegions(table);
187
188 LOG.debug("Starting to write region info for table " + table);
189 for (HRegionInfo regionInfo : regions) {
190 Path regionDir =
191 HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()),
192 regionInfo);
193 regionDir =
194 new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName());
195 writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
196 }
197 LOG.debug("Finished writing region info for table " + table);
198 }
199 } catch (IOException e) {
200 throw new BackupException(e);
201 }
202 }
203
204
205
206
207 public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs,
208 final Path regionInfoDir, HRegionInfo regionInfo) throws IOException {
209 final byte[] content = regionInfo.toDelimitedByteArray();
210 Path regionInfoFile = new Path(regionInfoDir, ".regioninfo");
211
212 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
213
214 FSDataOutputStream out = FSUtils.create(fs, regionInfoFile, perms, null);
215 try {
216 out.write(content);
217 } finally {
218 out.close();
219 }
220 }
221
222
223
224
225
226
227
228 public static String parseHostNameFromLogFile(Path p) throws IOException {
229 try{
230 if (isArchivedLogFile(p)) {
231 return BackupClientUtil.parseHostFromOldLog(p);
232 } else {
233 ServerName sname = DefaultWALProvider.getServerNameFromWALDirectoryName(p);
234 return sname.getHostname() + ":" + sname.getPort();
235 }
236 } catch(Exception e){
237 LOG.error(e);
238 return null;
239 }
240 }
241
242 private static boolean isArchivedLogFile(Path p) {
243 String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR;
244 return p.toString().contains(oldLog);
245 }
246
247
248
249
250
251
252
253
254 public static String getUniqueWALFileNamePart(String walFileName) throws IOException {
255 return getUniqueWALFileNamePart(new Path(walFileName));
256 }
257
258
259
260
261
262
263
264 public static String getUniqueWALFileNamePart(Path p) throws IOException {
265 return p.getName();
266 }
267
268
269
270
271
272
273
274
275 public static long getFilesLength(FileSystem fs, Path dir) throws IOException {
276 long totalLength = 0;
277 FileStatus[] files = FSUtils.listStatus(fs, dir);
278 if (files != null) {
279 for (FileStatus fileStatus : files) {
280 if (fileStatus.isDirectory()) {
281 totalLength += getFilesLength(fs, fileStatus.getPath());
282 } else {
283 totalLength += fileStatus.getLen();
284 }
285 }
286 }
287 return totalLength;
288 }
289
290
291
292
293
294
295
296
297 public static ArrayList<BackupInfo> sortHistoryListDesc(
298 ArrayList<BackupInfo> historyList) {
299 ArrayList<BackupInfo> list = new ArrayList<BackupInfo>();
300 TreeMap<String, BackupInfo> map = new TreeMap<String, BackupInfo>();
301 for (BackupInfo h : historyList) {
302 map.put(Long.toString(h.getStartTs()), h);
303 }
304 Iterator<String> i = map.descendingKeySet().iterator();
305 while (i.hasNext()) {
306 list.add(map.get(i.next()));
307 }
308 return list;
309 }
310
311
312
313
314
315
316
317 public static List<String> getListOfWALFiles(Configuration c) throws IOException {
318 Path rootDir = FSUtils.getRootDir(c);
319 Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
320 Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
321 List<String> logFiles = new ArrayList<String>();
322
323 FileSystem fs = FileSystem.get(c);
324 logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, null);
325 logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, null);
326 return logFiles;
327 }
328
329
330
331
332
333
334
335 public static List<String> getListOfWALFiles(Configuration c, PathFilter filter)
336 throws IOException {
337 Path rootDir = FSUtils.getRootDir(c);
338 Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
339 Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
340 List<String> logFiles = new ArrayList<String>();
341
342 FileSystem fs = FileSystem.get(c);
343 logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter);
344 logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter);
345 return logFiles;
346 }
347
348
349
350
351
352
353
354
355 public static List<String> getWALFilesOlderThan(final Configuration c,
356 final HashMap<String, Long> hostTimestampMap) throws IOException {
357 Path rootDir = FSUtils.getRootDir(c);
358 Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
359 Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
360 List<String> logFiles = new ArrayList<String>();
361
362 PathFilter filter = new PathFilter() {
363
364 @Override
365 public boolean accept(Path p) {
366 try {
367 if (DefaultWALProvider.isMetaFile(p)) {
368 return false;
369 }
370 String host = parseHostNameFromLogFile(p);
371 if(host == null) return false;
372 Long oldTimestamp = hostTimestampMap.get(host);
373 Long currentLogTS = BackupClientUtil.getCreationTime(p);
374 return currentLogTS <= oldTimestamp;
375 } catch (Exception e) {
376 LOG.error(e);
377 return false;
378 }
379 }
380 };
381 FileSystem fs = FileSystem.get(c);
382 logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter);
383 logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter);
384 return logFiles;
385 }
386
387 public static String join(TableName[] names) {
388 StringBuilder sb = new StringBuilder();
389 String sep = BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND;
390 for (TableName s : names) {
391 sb.append(sep).append(s.getNameAsString());
392 }
393 return sb.toString();
394 }
395
396 public static TableName[] parseTableNames(String tables) {
397 if (tables == null) {
398 return null;
399 }
400 String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
401
402 TableName[] ret = new TableName[tableArray.length];
403 for (int i = 0; i < tableArray.length; i++) {
404 ret[i] = TableName.valueOf(tableArray[i]);
405 }
406 return ret;
407 }
408
409 public static void cleanupBackupData(BackupInfo context, Configuration conf)
410 throws IOException
411 {
412 cleanupHLogDir(context, conf);
413 cleanupTargetDir(context, conf);
414 }
415
416
417
418
419
420 private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf)
421 throws IOException {
422
423 String logDir = backupContext.getHLogTargetDir();
424 if (logDir == null) {
425 LOG.warn("No log directory specified for " + backupContext.getBackupId());
426 return;
427 }
428
429 Path rootPath = new Path(logDir).getParent();
430 FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
431 FileStatus[] files = FSUtils.listStatus(fs, rootPath);
432 if (files == null) {
433 return;
434 }
435 for (FileStatus file : files) {
436 LOG.debug("Delete log files: " + file.getPath().getName());
437 if(!FSUtils.delete(fs, file.getPath(), true)) {
438 LOG.warn("Could not delete files in "+ file.getPath());
439 };
440 }
441 }
442
443
444
445
446 private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) {
447 try {
448
449 LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId());
450 String targetDir = backupContext.getTargetRootDir();
451 if (targetDir == null) {
452 LOG.warn("No target directory specified for " + backupContext.getBackupId());
453 return;
454 }
455
456 FileSystem outputFs =
457 FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
458
459 for (TableName table : backupContext.getTables()) {
460 Path targetDirPath =
461 new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
462 backupContext.getBackupId(), table));
463 if (outputFs.delete(targetDirPath, true)) {
464 LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
465 } else {
466 LOG.info("No data has been found in " + targetDirPath.toString() + ".");
467 }
468
469 Path tableDir = targetDirPath.getParent();
470 FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
471 if (backups == null || backups.length == 0) {
472 if(outputFs.delete(tableDir, true)){
473 LOG.debug(tableDir.toString() + " is empty, remove it.");
474 } else {
475 LOG.warn("Could not delete "+ tableDir);
476 }
477 }
478 }
479
480 } catch (IOException e1) {
481 LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at "
482 + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
483 }
484 }
485
486
487 }