1818package org .apache .hadoop .hbase .backup .impl ;
1919
2020import static org .apache .hadoop .hbase .backup .BackupRestoreConstants .JOB_NAME_CONF_KEY ;
21- import static org .apache .hadoop .hbase .backup .impl .BackupManifest .BackupImage ;
2221
2322import java .io .IOException ;
2423import java .net .URI ;
2524import java .net .URISyntaxException ;
2625import java .util .ArrayList ;
27- import java .util .HashMap ;
2826import java .util .List ;
2927import java .util .Map ;
3028import java .util .Set ;
4240import org .apache .hadoop .hbase .backup .util .BackupUtils ;
4341import org .apache .hadoop .hbase .client .Admin ;
4442import org .apache .hadoop .hbase .client .Connection ;
45- import org .apache .hadoop .hbase .client .RegionInfo ;
46- import org .apache .hadoop .hbase .io .ImmutableBytesWritable ;
47- import org .apache .hadoop .hbase .mapreduce .HFileOutputFormat2 ;
4843import org .apache .hadoop .hbase .mapreduce .WALPlayer ;
49- import org .apache .hadoop .hbase .mob .MobConstants ;
50- import org .apache .hadoop .hbase .snapshot .SnapshotDescriptionUtils ;
51- import org .apache .hadoop .hbase .snapshot .SnapshotManifest ;
5244import org .apache .hadoop .hbase .util .Bytes ;
5345import org .apache .hadoop .hbase .util .CommonFSUtils ;
5446import org .apache .hadoop .hbase .util .HFileArchiveUtil ;
5547import org .apache .hadoop .hbase .util .Pair ;
5648import org .apache .hadoop .hbase .wal .AbstractFSWALProvider ;
49+ import org .apache .hadoop .util .Tool ;
5750import org .apache .yetus .audience .InterfaceAudience ;
5851import org .slf4j .Logger ;
5952import org .slf4j .LoggerFactory ;
6053
61- import org .apache .hadoop .hbase .shaded .protobuf .ProtobufUtil ;
62- import org .apache .hadoop .hbase .shaded .protobuf .generated .HBaseProtos ;
63- import org .apache .hadoop .hbase .shaded .protobuf .generated .SnapshotProtos ;
64-
6554/**
6655 * Incremental backup implementation. See the {@link #execute() execute} method.
6756 */
@@ -287,48 +276,10 @@ public void execute() throws IOException {
287276
288277 // case INCREMENTAL_COPY:
289278 try {
290- // todo: need to add an abstraction to encapsulate and DRY this up
291- ArrayList <BackupImage > ancestors = backupManager .getAncestors (backupInfo );
292- Map <TableName , List <RegionInfo >> regionsByTable = new HashMap <>();
293- List <ImmutableBytesWritable > splits = new ArrayList <>();
294- for (TableName table : backupInfo .getTables ()) {
295- ArrayList <BackupImage > ancestorsForTable =
296- BackupManager .filterAncestorsForTable (ancestors , table );
297-
298- BackupImage backupImage = ancestorsForTable .get (ancestorsForTable .size () - 1 );
299- if (backupImage .getType () != BackupType .FULL ) {
300- throw new RuntimeException ("No full backup found in ancestors for table " + table );
301- }
302-
303- String lastFullBackupId = backupImage .getBackupId ();
304- Path backupRootDir = new Path (backupInfo .getBackupRootDir ());
305-
306- FileSystem backupFs = backupRootDir .getFileSystem (conf );
307- Path tableInfoPath =
308- BackupUtils .getTableInfoPath (backupFs , backupRootDir , lastFullBackupId , table );
309- SnapshotProtos .SnapshotDescription snapshotDesc =
310- SnapshotDescriptionUtils .readSnapshotInfo (backupFs , tableInfoPath );
311- SnapshotManifest manifest =
312- SnapshotManifest .open (conf , backupFs , tableInfoPath , snapshotDesc );
313- List <RegionInfo > regionInfos = new ArrayList <>(manifest .getRegionManifests ().size ());
314- for (SnapshotProtos .SnapshotRegionManifest regionManifest : manifest .getRegionManifests ()) {
315- HBaseProtos .RegionInfo regionInfo = regionManifest .getRegionInfo ();
316- RegionInfo regionInfoObj = ProtobufUtil .toRegionInfo (regionInfo );
317- // scanning meta doesnt return mob regions, so skip them here too so we keep parity
318- if (Bytes .equals (regionInfoObj .getStartKey (), MobConstants .MOB_REGION_NAME_BYTES )) {
319- continue ;
320- }
321-
322- regionInfos .add (regionInfoObj );
323- splits .add (new ImmutableBytesWritable (HFileOutputFormat2
324- .combineTableNameSuffix (table .getName (), regionInfoObj .getStartKey ())));
325- }
326- regionsByTable .put (table , regionInfos );
327- }
328279 // copy out the table and region info files for each table
329- BackupUtils .copyTableRegionInfo (conn , backupInfo , regionsByTable , conf );
280+ BackupUtils .copyTableRegionInfo (conn , backupInfo , conf );
330281 // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
331- convertWALsToHFiles (splits );
282+ convertWALsToHFiles ();
332283 incrementalCopyHFiles (new String [] { getBulkOutputDir ().toString () },
333284 backupInfo .getBackupRootDir ());
334285 } catch (Exception e ) {
@@ -408,7 +359,7 @@ protected void deleteBulkLoadDirectory() throws IOException {
408359 }
409360 }
410361
411- protected void convertWALsToHFiles (List < ImmutableBytesWritable > splits ) throws IOException {
362+ protected void convertWALsToHFiles () throws IOException {
412363 // get incremental backup file list and prepare parameters for DistCp
413364 List <String > incrBackupFileList = backupInfo .getIncrBackupFileList ();
414365 // Get list of tables in incremental backup set
@@ -424,7 +375,7 @@ protected void convertWALsToHFiles(List<ImmutableBytesWritable> splits) throws I
424375 LOG .warn ("Table " + table + " does not exists. Skipping in WAL converter" );
425376 }
426377 }
427- walToHFiles (incrBackupFileList , tableList , splits );
378+ walToHFiles (incrBackupFileList , tableList );
428379
429380 }
430381
@@ -434,9 +385,8 @@ protected boolean tableExists(TableName table, Connection conn) throws IOExcepti
434385 }
435386 }
436387
437- protected void walToHFiles (List <String > dirPaths , List <String > tableList ,
438- List <ImmutableBytesWritable > splits ) throws IOException {
439- WALPlayer player = new WALPlayer ();
388+ protected void walToHFiles (List <String > dirPaths , List <String > tableList ) throws IOException {
389+ Tool player = new WALPlayer ();
440390
441391 // Player reads all files in arbitrary directory structure and creates
442392 // a Map task for each file. We use ';' as separator
@@ -451,7 +401,6 @@ protected void walToHFiles(List<String> dirPaths, List<String> tableList,
451401 conf .set (JOB_NAME_CONF_KEY , jobname );
452402 String [] playerArgs = { dirs , StringUtils .join (tableList , "," ) };
453403
454- player .setSplits (splits );
455404 try {
456405 player .setConf (conf );
457406 int result = player .run (playerArgs );
0 commit comments