1818package org .apache .hadoop .hbase .backup .impl ;
1919
2020import static org .apache .hadoop .hbase .backup .BackupRestoreConstants .JOB_NAME_CONF_KEY ;
21+ import static org .apache .hadoop .hbase .backup .impl .BackupManifest .BackupImage ;
2122
2223import java .io .IOException ;
2324import java .net .URI ;
2425import java .net .URISyntaxException ;
2526import java .util .ArrayList ;
27+ import java .util .HashMap ;
2628import java .util .List ;
2729import java .util .Map ;
2830import java .util .Set ;
4042import org .apache .hadoop .hbase .backup .util .BackupUtils ;
4143import org .apache .hadoop .hbase .client .Admin ;
4244import org .apache .hadoop .hbase .client .Connection ;
45+ import org .apache .hadoop .hbase .client .RegionInfo ;
46+ import org .apache .hadoop .hbase .io .ImmutableBytesWritable ;
47+ import org .apache .hadoop .hbase .mapreduce .HFileOutputFormat2 ;
4348import org .apache .hadoop .hbase .mapreduce .WALPlayer ;
49+ import org .apache .hadoop .hbase .mob .MobConstants ;
50+ import org .apache .hadoop .hbase .snapshot .SnapshotDescriptionUtils ;
51+ import org .apache .hadoop .hbase .snapshot .SnapshotManifest ;
4452import org .apache .hadoop .hbase .util .Bytes ;
4553import org .apache .hadoop .hbase .util .CommonFSUtils ;
4654import org .apache .hadoop .hbase .util .HFileArchiveUtil ;
4755import org .apache .hadoop .hbase .util .Pair ;
4856import org .apache .hadoop .hbase .wal .AbstractFSWALProvider ;
49- import org .apache .hadoop .util .Tool ;
5057import org .apache .yetus .audience .InterfaceAudience ;
5158import org .slf4j .Logger ;
5259import org .slf4j .LoggerFactory ;
5360
61+ import org .apache .hadoop .hbase .shaded .protobuf .ProtobufUtil ;
62+ import org .apache .hadoop .hbase .shaded .protobuf .generated .HBaseProtos ;
63+ import org .apache .hadoop .hbase .shaded .protobuf .generated .SnapshotProtos ;
64+
5465/**
5566 * Incremental backup implementation. See the {@link #execute() execute} method.
5667 */
@@ -276,10 +287,48 @@ public void execute() throws IOException {
276287
277288 // case INCREMENTAL_COPY:
278289 try {
290+ // todo: need to add an abstraction to encapsulate and DRY this up
291+ ArrayList <BackupImage > ancestors = backupManager .getAncestors (backupInfo );
292+ Map <TableName , List <RegionInfo >> regionsByTable = new HashMap <>();
293+ List <ImmutableBytesWritable > splits = new ArrayList <>();
294+ for (TableName table : backupInfo .getTables ()) {
295+ ArrayList <BackupImage > ancestorsForTable =
296+ BackupManager .filterAncestorsForTable (ancestors , table );
297+
298+ BackupImage backupImage = ancestorsForTable .get (ancestorsForTable .size () - 1 );
299+ if (backupImage .getType () != BackupType .FULL ) {
300+ throw new RuntimeException ("No full backup found in ancestors for table " + table );
301+ }
302+
303+ String lastFullBackupId = backupImage .getBackupId ();
304+ Path backupRootDir = new Path (backupInfo .getBackupRootDir ());
305+
306+ FileSystem backupFs = backupRootDir .getFileSystem (conf );
307+ Path tableInfoPath =
308+ BackupUtils .getTableInfoPath (backupFs , backupRootDir , lastFullBackupId , table );
309+ SnapshotProtos .SnapshotDescription snapshotDesc =
310+ SnapshotDescriptionUtils .readSnapshotInfo (backupFs , tableInfoPath );
311+ SnapshotManifest manifest =
312+ SnapshotManifest .open (conf , backupFs , tableInfoPath , snapshotDesc );
313+ List <RegionInfo > regionInfos = new ArrayList <>(manifest .getRegionManifests ().size ());
314+ for (SnapshotProtos .SnapshotRegionManifest regionManifest : manifest .getRegionManifests ()) {
315+ HBaseProtos .RegionInfo regionInfo = regionManifest .getRegionInfo ();
316+ RegionInfo regionInfoObj = ProtobufUtil .toRegionInfo (regionInfo );
317+ // scanning meta doesnt return mob regions, so skip them here too so we keep parity
318+ if (Bytes .equals (regionInfoObj .getStartKey (), MobConstants .MOB_REGION_NAME_BYTES )) {
319+ continue ;
320+ }
321+
322+ regionInfos .add (regionInfoObj );
323+ splits .add (new ImmutableBytesWritable (HFileOutputFormat2
324+ .combineTableNameSuffix (table .getName (), regionInfoObj .getStartKey ())));
325+ }
326+ regionsByTable .put (table , regionInfos );
327+ }
279328 // copy out the table and region info files for each table
280- BackupUtils .copyTableRegionInfo (conn , backupInfo , conf );
329+ BackupUtils .copyTableRegionInfo (conn , backupInfo , regionsByTable , conf );
281330 // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
282- convertWALsToHFiles ();
331+ convertWALsToHFiles (splits );
283332 incrementalCopyHFiles (new String [] { getBulkOutputDir ().toString () },
284333 backupInfo .getBackupRootDir ());
285334 } catch (Exception e ) {
@@ -359,7 +408,7 @@ protected void deleteBulkLoadDirectory() throws IOException {
359408 }
360409 }
361410
362- protected void convertWALsToHFiles () throws IOException {
411+ protected void convertWALsToHFiles (List < ImmutableBytesWritable > splits ) throws IOException {
363412 // get incremental backup file list and prepare parameters for DistCp
364413 List <String > incrBackupFileList = backupInfo .getIncrBackupFileList ();
365414 // Get list of tables in incremental backup set
@@ -375,7 +424,7 @@ protected void convertWALsToHFiles() throws IOException {
375424 LOG .warn ("Table " + table + " does not exists. Skipping in WAL converter" );
376425 }
377426 }
378- walToHFiles (incrBackupFileList , tableList );
427+ walToHFiles (incrBackupFileList , tableList , splits );
379428
380429 }
381430
@@ -385,8 +434,9 @@ protected boolean tableExists(TableName table, Connection conn) throws IOExcepti
385434 }
386435 }
387436
388- protected void walToHFiles (List <String > dirPaths , List <String > tableList ) throws IOException {
389- Tool player = new WALPlayer ();
437+ protected void walToHFiles (List <String > dirPaths , List <String > tableList ,
438+ List <ImmutableBytesWritable > splits ) throws IOException {
439+ WALPlayer player = new WALPlayer ();
390440
391441 // Player reads all files in arbitrary directory structure and creates
392442 // a Map task for each file. We use ';' as separator
@@ -401,6 +451,7 @@ protected void walToHFiles(List<String> dirPaths, List<String> tableList) throws
401451 conf .set (JOB_NAME_CONF_KEY , jobname );
402452 String [] playerArgs = { dirs , StringUtils .join (tableList , "," ) };
403453
454+ player .setSplits (splits );
404455 try {
405456 player .setConf (conf );
406457 int result = player .run (playerArgs );
0 commit comments