Skip to content

Commit 5427656

Browse files
rubenvw-ngdatabbeaudreault
authored andcommitted
HBASE-28412 Select correct target table for incremental backup restore (#5776)
Contributed-by: Ruben Van Wanzeele <[email protected]> Signed-off-by: Bryan Beaudreault <[email protected]>
1 parent a53008b commit 5427656

File tree

4 files changed

+317
-27
lines changed

4 files changed

+317
-27
lines changed

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,7 @@ public void run(Path[] dirPaths, TableName[] tableNames, Path restoreRootDir,
7474
BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf());
7575
Configuration conf = getConf();
7676
conf.set(bulkOutputConfKey, bulkOutputPath.toString());
77-
String[] playerArgs = { dirs,
78-
fullBackupRestore ? newTableNames[i].getNameAsString() : tableNames[i].getNameAsString() };
79-
77+
String[] playerArgs = { dirs, newTableNames[i].getNameAsString() };
8078
int result;
8179
try {
8280

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.backup;
19+
20+
import static org.junit.Assert.assertEquals;
21+
22+
import java.io.IOException;
23+
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
25+
import org.apache.hadoop.hbase.backup.impl.BackupManager;
26+
import org.apache.hadoop.hbase.client.Connection;
27+
import org.apache.hadoop.hbase.client.ConnectionFactory;
28+
import org.apache.yetus.audience.InterfaceAudience;
29+
30+
@InterfaceAudience.Private
31+
public class BackupTestUtil {
32+
private BackupTestUtil() {
33+
}
34+
35+
static BackupInfo verifyBackup(Configuration conf, String backupId, BackupType expectedType,
36+
BackupInfo.BackupState expectedState) throws IOException {
37+
try (Connection connection = ConnectionFactory.createConnection(conf);
38+
BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
39+
BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId);
40+
41+
// Verify managed backup in HBase
42+
assertEquals(backupId, backupInfo.getBackupId());
43+
assertEquals(expectedState, backupInfo.getState());
44+
assertEquals(expectedType, backupInfo.getType());
45+
return backupInfo;
46+
}
47+
}
48+
49+
static void enableBackup(Configuration conf) {
50+
// Enable backup
51+
conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
52+
BackupManager.decorateMasterConfiguration(conf);
53+
BackupManager.decorateRegionServerConfiguration(conf);
54+
}
55+
}
Lines changed: 257 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,257 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.backup;
19+
20+
import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE;
21+
import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup;
22+
import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup;
23+
import static org.apache.hadoop.hbase.backup.BackupType.FULL;
24+
import static org.apache.hadoop.hbase.backup.BackupType.INCREMENTAL;
25+
import static org.junit.Assert.assertEquals;
26+
import static org.junit.Assert.assertTrue;
27+
28+
import java.io.IOException;
29+
import java.time.Instant;
30+
import java.util.ArrayList;
31+
import java.util.Arrays;
32+
import java.util.Collections;
33+
import java.util.List;
34+
import org.apache.hadoop.conf.Configuration;
35+
import org.apache.hadoop.fs.Path;
36+
import org.apache.hadoop.hbase.Cell;
37+
import org.apache.hadoop.hbase.HBaseClassTestRule;
38+
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
39+
import org.apache.hadoop.hbase.HBaseConfiguration;
40+
import org.apache.hadoop.hbase.TableName;
41+
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
42+
import org.apache.hadoop.hbase.client.Admin;
43+
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
44+
import org.apache.hadoop.hbase.client.Connection;
45+
import org.apache.hadoop.hbase.client.ConnectionFactory;
46+
import org.apache.hadoop.hbase.client.Put;
47+
import org.apache.hadoop.hbase.client.Result;
48+
import org.apache.hadoop.hbase.client.Scan;
49+
import org.apache.hadoop.hbase.client.Table;
50+
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
51+
import org.apache.hadoop.hbase.testclassification.MediumTests;
52+
import org.apache.hadoop.hbase.testing.TestingHBaseCluster;
53+
import org.apache.hadoop.hbase.testing.TestingHBaseClusterOption;
54+
import org.apache.hadoop.hbase.util.Bytes;
55+
import org.junit.After;
56+
import org.junit.AfterClass;
57+
import org.junit.Before;
58+
import org.junit.BeforeClass;
59+
import org.junit.ClassRule;
60+
import org.junit.Test;
61+
import org.junit.experimental.categories.Category;
62+
import org.junit.runner.RunWith;
63+
import org.junit.runners.Parameterized;
64+
import org.slf4j.Logger;
65+
import org.slf4j.LoggerFactory;
66+
67+
@Category(MediumTests.class)
68+
@RunWith(Parameterized.class)
69+
public class TestBackupRestoreOnEmptyEnvironment {
70+
71+
private static final Logger LOG =
72+
LoggerFactory.getLogger(TestBackupRestoreOnEmptyEnvironment.class);
73+
74+
@ClassRule
75+
public static final HBaseClassTestRule CLASS_RULE =
76+
HBaseClassTestRule.forClass(TestBackupRestoreOnEmptyEnvironment.class);
77+
78+
@Parameterized.Parameters(name = "{index}: restoreToOtherTable={0}")
79+
public static Iterable<Object[]> data() {
80+
return HBaseCommonTestingUtility.BOOLEAN_PARAMETERIZED;
81+
}
82+
83+
@Parameterized.Parameter(0)
84+
public boolean restoreToOtherTable;
85+
private TableName sourceTable;
86+
private TableName targetTable;
87+
88+
private static TestingHBaseCluster cluster;
89+
private static Path BACKUP_ROOT_DIR;
90+
private static final byte[] COLUMN_FAMILY = Bytes.toBytes("0");
91+
92+
@BeforeClass
93+
public static void beforeClass() throws Exception {
94+
Configuration conf = HBaseConfiguration.create();
95+
enableBackup(conf);
96+
cluster = TestingHBaseCluster.create(TestingHBaseClusterOption.builder().conf(conf).build());
97+
cluster.start();
98+
BACKUP_ROOT_DIR = new Path(new Path(conf.get("fs.defaultFS")), new Path("/backupIT"));
99+
}
100+
101+
@AfterClass
102+
public static void afterClass() throws Exception {
103+
cluster.stop();
104+
}
105+
106+
@Before
107+
public void setUp() throws Exception {
108+
sourceTable = TableName.valueOf("table");
109+
targetTable = TableName.valueOf("another-table");
110+
createTable(sourceTable);
111+
createTable(targetTable);
112+
}
113+
114+
@After
115+
public void removeTables() throws Exception {
116+
deleteTables();
117+
}
118+
119+
@Test
120+
public void testRestoreToCorrectTable() throws Exception {
121+
Instant timestamp = Instant.now().minusSeconds(10);
122+
123+
// load some data
124+
putLoad(sourceTable, timestamp, "data");
125+
126+
String backupId = backup(FULL, Collections.singletonList(sourceTable));
127+
BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
128+
assertTrue(backupInfo.getTables().contains(sourceTable));
129+
130+
LOG.info("Deleting the tables before restore ...");
131+
deleteTables();
132+
133+
if (restoreToOtherTable) {
134+
restore(backupId, sourceTable, targetTable);
135+
validateDataEquals(targetTable, "data");
136+
} else {
137+
restore(backupId, sourceTable, sourceTable);
138+
validateDataEquals(sourceTable, "data");
139+
}
140+
141+
}
142+
143+
@Test
144+
public void testRestoreCorrectTableForIncremental() throws Exception {
145+
Instant timestamp = Instant.now().minusSeconds(10);
146+
147+
// load some data
148+
putLoad(sourceTable, timestamp, "data");
149+
150+
String backupId = backup(FULL, Collections.singletonList(sourceTable));
151+
verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
152+
153+
// some incremental data
154+
putLoad(sourceTable, timestamp.plusMillis(1), "new_data");
155+
156+
String backupId2 = backup(INCREMENTAL, Collections.singletonList(sourceTable));
157+
verifyBackup(cluster.getConf(), backupId2, INCREMENTAL, COMPLETE);
158+
159+
LOG.info("Deleting the tables before restore ...");
160+
deleteTables();
161+
162+
if (restoreToOtherTable) {
163+
restore(backupId2, sourceTable, targetTable);
164+
validateDataEquals(targetTable, "new_data");
165+
} else {
166+
restore(backupId2, sourceTable, sourceTable);
167+
validateDataEquals(sourceTable, "new_data");
168+
}
169+
170+
}
171+
172+
private void createTable(TableName tableName) throws IOException {
173+
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName)
174+
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(COLUMN_FAMILY));
175+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
176+
Admin admin = connection.getAdmin()) {
177+
admin.createTable(builder.build());
178+
}
179+
}
180+
181+
private void deleteTables() throws IOException {
182+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
183+
Admin admin = connection.getAdmin()) {
184+
for (TableName table : Arrays.asList(sourceTable, targetTable)) {
185+
if (admin.tableExists(table)) {
186+
admin.disableTable(table);
187+
admin.deleteTable(table);
188+
}
189+
}
190+
}
191+
}
192+
193+
private void putLoad(TableName tableName, Instant timestamp, String data) throws IOException {
194+
LOG.info("Writing new data to HBase using normal Puts: {}", data);
195+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf())) {
196+
Table table = connection.getTable(sourceTable);
197+
List<Put> puts = new ArrayList<>();
198+
for (int i = 0; i < 10; i++) {
199+
Put put = new Put(Bytes.toBytes(i), timestamp.toEpochMilli());
200+
put.addColumn(COLUMN_FAMILY, Bytes.toBytes("data"), Bytes.toBytes(data));
201+
puts.add(put);
202+
203+
if (i % 100 == 0) {
204+
table.put(puts);
205+
puts.clear();
206+
}
207+
}
208+
if (!puts.isEmpty()) {
209+
table.put(puts);
210+
}
211+
connection.getAdmin().flush(tableName);
212+
}
213+
}
214+
215+
private String backup(BackupType backupType, List<TableName> tables) throws IOException {
216+
LOG.info("Creating the backup ...");
217+
218+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
219+
BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
220+
BackupRequest backupRequest =
221+
new BackupRequest.Builder().withTargetRootDir(BACKUP_ROOT_DIR.toString())
222+
.withTableList(new ArrayList<>(tables)).withBackupType(backupType).build();
223+
return backupAdmin.backupTables(backupRequest);
224+
}
225+
226+
}
227+
228+
private void restore(String backupId, TableName sourceTableName, TableName targetTableName)
229+
throws IOException {
230+
LOG.info("Restoring data ...");
231+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
232+
BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
233+
RestoreRequest restoreRequest = new RestoreRequest.Builder().withBackupId(backupId)
234+
.withBackupRootDir(BACKUP_ROOT_DIR.toString()).withOvewrite(true)
235+
.withFromTables(new TableName[] { sourceTableName })
236+
.withToTables(new TableName[] { targetTableName }).build();
237+
backupAdmin.restore(restoreRequest);
238+
}
239+
}
240+
241+
private void validateDataEquals(TableName tableName, String expectedData) throws IOException {
242+
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
243+
Table table = connection.getTable(tableName)) {
244+
Scan scan = new Scan();
245+
scan.setRaw(true);
246+
scan.setBatch(100);
247+
248+
for (Result sourceResult : table.getScanner(scan)) {
249+
List<Cell> sourceCells = sourceResult.listCells();
250+
for (Cell cell : sourceCells) {
251+
assertEquals(expectedData, Bytes.toStringBinary(cell.getValueArray(),
252+
cell.getValueOffset(), cell.getValueLength()));
253+
}
254+
}
255+
}
256+
}
257+
}

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreWithModifications.java

Lines changed: 4 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
package org.apache.hadoop.hbase.backup;
1919

2020
import static org.apache.hadoop.hbase.backup.BackupInfo.BackupState.COMPLETE;
21+
import static org.apache.hadoop.hbase.backup.BackupTestUtil.enableBackup;
22+
import static org.apache.hadoop.hbase.backup.BackupTestUtil.verifyBackup;
2123
import static org.apache.hadoop.hbase.backup.BackupType.FULL;
2224
import static org.junit.Assert.assertEquals;
2325
import static org.junit.Assert.assertFalse;
@@ -41,7 +43,6 @@
4143
import org.apache.hadoop.hbase.KeyValue;
4244
import org.apache.hadoop.hbase.TableName;
4345
import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
44-
import org.apache.hadoop.hbase.backup.impl.BackupManager;
4546
import org.apache.hadoop.hbase.client.Admin;
4647
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
4748
import org.apache.hadoop.hbase.client.Connection;
@@ -126,7 +127,7 @@ public void testModificationsOnTable() throws Exception {
126127
load(sourceTable, timestamp, "data");
127128

128129
String backupId = backup(FULL, allTables);
129-
BackupInfo backupInfo = verifyBackup(backupId, FULL, COMPLETE);
130+
BackupInfo backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
130131
assertTrue(backupInfo.getTables().contains(sourceTable));
131132

132133
restore(backupId, sourceTable, targetTable);
@@ -137,7 +138,7 @@ public void testModificationsOnTable() throws Exception {
137138
load(sourceTable, timestamp, "changed_data");
138139

139140
backupId = backup(FULL, allTables);
140-
backupInfo = verifyBackup(backupId, FULL, COMPLETE);
141+
backupInfo = verifyBackup(cluster.getConf(), backupId, FULL, COMPLETE);
141142
assertTrue(backupInfo.getTables().contains(sourceTable));
142143

143144
restore(backupId, sourceTable, targetTable);
@@ -252,25 +253,4 @@ private void validateDataEquals(TableName tableName, String expectedData) throws
252253
}
253254
}
254255

255-
private BackupInfo verifyBackup(String backupId, BackupType expectedType,
256-
BackupInfo.BackupState expectedState) throws IOException {
257-
try (Connection connection = ConnectionFactory.createConnection(cluster.getConf());
258-
BackupAdmin backupAdmin = new BackupAdminImpl(connection)) {
259-
BackupInfo backupInfo = backupAdmin.getBackupInfo(backupId);
260-
261-
// Verify managed backup in HBase
262-
assertEquals(backupId, backupInfo.getBackupId());
263-
assertEquals(expectedState, backupInfo.getState());
264-
assertEquals(expectedType, backupInfo.getType());
265-
return backupInfo;
266-
}
267-
}
268-
269-
private static void enableBackup(Configuration conf) {
270-
// Enable backup
271-
conf.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true);
272-
BackupManager.decorateMasterConfiguration(conf);
273-
BackupManager.decorateRegionServerConfiguration(conf);
274-
}
275-
276256
}

0 commit comments

Comments
 (0)