View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements. See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership. The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License. You may obtain a copy of the License at
9    *
10   * http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.backup;
20  
21  import static org.junit.Assert.assertTrue;
22  
23  import java.util.ArrayList;
24  import java.util.Collection;
25  import java.util.List;
26  import java.util.Map;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.hbase.TableName;
31  import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
32  import org.apache.hadoop.hbase.client.BackupAdmin;
33  import org.apache.hadoop.hbase.client.Connection;
34  import org.apache.hadoop.hbase.client.ConnectionFactory;
35  import org.apache.hadoop.hbase.client.HBaseAdmin;
36  import org.apache.hadoop.hbase.client.HTable;
37  import org.apache.hadoop.hbase.client.Put;
38  import org.apache.hadoop.hbase.mapreduce.TestLoadIncrementalHFiles;
39  import org.apache.hadoop.hbase.testclassification.LargeTests;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.Pair;
42  import org.hamcrest.CoreMatchers;
43  import org.junit.Assert;
44  import org.junit.Test;
45  import org.junit.experimental.categories.Category;
46  import org.junit.runner.RunWith;
47  import org.junit.runners.Parameterized;
48  
49  import com.google.common.collect.Lists;
50  
51  /**
52   * 
53   *  1. Create table t1, t2
54   *  2. Load data to t1, t2
55   *  3  Full backup t1, t2
56   *  4  Delete t2
57   *  5  Load data to t1
58   *  6  Incremental backup t1
59   */
60  @Category(LargeTests.class)
61  @RunWith(Parameterized.class)
62  public class TestIncrementalBackupDeleteTable extends TestBackupBase {
63    private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class);
64  
65    @Parameterized.Parameters
66    public static Collection<Object[]> data() {
67      secure = true;
68      List<Object[]> params = new ArrayList<Object[]>();
69      params.add(new Object[] {Boolean.TRUE});
70      return params;
71    }
72  
73    public TestIncrementalBackupDeleteTable(Boolean b) {
74    }
75  
76    @Test
77    public void TestIncBackupDeleteTable() throws Exception {
78      String testName = "TestIncBackupDeleteTable";
79      // #1 - create full backup for all tables
80      LOG.info("create full backup image for all tables");
81  
82      List<TableName> tables = Lists.newArrayList(table1, table2);
83      HBaseAdmin admin = null;
84      Connection conn = ConnectionFactory.createConnection(conf1);
85      admin = (HBaseAdmin) conn.getAdmin();
86  
87      BackupRequest request = new BackupRequest();
88      request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
89      String backupIdFull = admin.getBackupAdmin().backupTables(request);
90  
91      assertTrue(checkSucceeded(backupIdFull));
92  
93      // #2 - insert some data to table table1
94      HTable t1 = (HTable) conn.getTable(table1);
95      Put p1;
96      for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
97        p1 = new Put(Bytes.toBytes("row-t1" + i));
98        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
99        t1.put(p1);
100     }
101 
102     Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
103     t1.close();
104 
105     // Delete table table2
106     admin.disableTable(table2);
107     admin.deleteTable(table2);
108 
109     int NB_ROWS2 = 20;
110     LOG.debug("bulk loading into " + testName);
111     int actual = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName,
112         qualName, false, null, new byte[][][] {
113       new byte[][]{ Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
114       new byte[][]{ Bytes.toBytes("ddd"), Bytes.toBytes("ooo") },
115     }, true, false, true, NB_ROWS_IN_BATCH*2, NB_ROWS2, false);
116 
117     // #3 - incremental backup for table1
118     tables = Lists.newArrayList(table1);
119     request = new BackupRequest();
120     request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
121     .setTargetRootDir(BACKUP_ROOT_DIR);
122     String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
123     assertTrue(checkSucceeded(backupIdIncMultiple));
124 
125     BackupAdmin client = getBackupAdmin();
126     /*   // #4 - restore full backup for all tables, without overwrite
127     TableName[] tablesRestoreFull =
128         new TableName[] { table1, table2};
129 
130     TableName[] tablesMapFull =
131         new TableName[] { table1_restore, table2_restore };
132 
133     client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
134         tablesRestoreFull,
135         tablesMapFull, false));
136 
137     // #5.1 - check tables for full restore
138     HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
139     assertTrue(hAdmin.tableExists(table1_restore));
140     assertTrue(hAdmin.tableExists(table2_restore));
141 
142 
143     hAdmin.close();
144 
145     // #5.2 - checking row count of tables for full restore
146     HTable hTable = (HTable) conn.getTable(table1_restore);
147     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
148     hTable.close();
149 
150     hTable = (HTable) conn.getTable(table2_restore);
151     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
152     hTable.close();
153      */
154     // #6 - restore incremental backup for table1
155     TableName[] tablesRestoreIncMultiple =
156         new TableName[] { table1 };
157     TableName[] tablesMapIncMultiple =
158         new TableName[] { table1_restore };
159     client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false,
160       tablesRestoreIncMultiple, tablesMapIncMultiple, true));
161 
162     HTable hTable = (HTable) conn.getTable(table1_restore);
163     Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2 +
164         actual));
165 
166     request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
167     backupIdFull = client.backupTables(request);
168     try (final BackupSystemTable table = new BackupSystemTable(conn)) {
169       Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair
170       =table.readOrigBulkloadRows(tables);
171       assertTrue("map still has " + pair.getSecond().size() + " entries",
172           pair.getSecond().isEmpty());
173     }
174     assertTrue(checkSucceeded(backupIdFull));
175 
176     hTable.close();
177     admin.close();
178     conn.close();
179   }
180 
181 }