Skip to content

Commit 8bec26e

Browse files
GeorryHuangjoshelser
authored andcommitted
HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker configurations to TableDescriptor for existing tables (#3700)
Signed-off-by: Duo Zhang <[email protected]> Reviewed-by: Wellington Ramos Chevreuil <[email protected]>
1 parent 08d1171 commit 8bec26e

File tree

4 files changed

+291
-0
lines changed

4 files changed

+291
-0
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@
131131
import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
132132
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
133133
import org.apache.hadoop.hbase.master.locking.LockManager;
134+
import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
134135
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
135136
import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
136137
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
@@ -376,6 +377,7 @@ public class HMaster extends HBaseServerBase<MasterRpcServices> implements Maste
376377
private ReplicationBarrierCleaner replicationBarrierCleaner;
377378
private MobFileCleanerChore mobFileCleanerChore;
378379
private MobFileCompactionChore mobFileCompactionChore;
380+
private RollingUpgradeChore rollingUpgradeChore;
379381
// used to synchronize the mobCompactionStates
380382
private final IdLock mobCompactionLock = new IdLock();
381383
// save the information of mob compactions in tables.
@@ -1222,6 +1224,9 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
12221224
LOG.debug("Balancer post startup initialization complete, took " + (
12231225
(EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
12241226
}
1227+
1228+
this.rollingUpgradeChore = new RollingUpgradeChore(this);
1229+
getChoreService().scheduleChore(rollingUpgradeChore);
12251230
}
12261231

12271232
private void createMissingCFsInMetaDuringUpgrade(
@@ -1713,6 +1718,7 @@ protected void stopChores() {
17131718
shutdownChore(snapshotCleanerChore);
17141719
shutdownChore(hbckChore);
17151720
shutdownChore(regionsRecoveryChore);
1721+
shutdownChore(rollingUpgradeChore);
17161722
}
17171723

17181724
/**
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.hbase.master.migrate;
20+
21+
import java.io.IOException;
22+
import java.util.ArrayList;
23+
import java.util.Iterator;
24+
import java.util.List;
25+
import java.util.Map;
26+
import java.util.concurrent.TimeUnit;
27+
import java.util.stream.Collectors;
28+
import org.apache.commons.lang3.StringUtils;
29+
import org.apache.hadoop.conf.Configuration;
30+
import org.apache.hadoop.hbase.ScheduledChore;
31+
import org.apache.hadoop.hbase.Stoppable;
32+
import org.apache.hadoop.hbase.TableDescriptors;
33+
import org.apache.hadoop.hbase.client.TableDescriptor;
34+
import org.apache.hadoop.hbase.master.MasterServices;
35+
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
36+
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
37+
import org.apache.hadoop.hbase.regionserver.storefiletracker.MigrateStoreFileTrackerProcedure;
38+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
39+
import org.apache.yetus.audience.InterfaceAudience;
40+
import org.slf4j.Logger;
41+
import org.slf4j.LoggerFactory;
42+
43+
/**
44+
* To avoid too many migrating/upgrade threads to be submitted at the time during master
45+
* initialization, RollingUpgradeChore handles all rolling-upgrade tasks.
46+
* */
47+
@InterfaceAudience.Private
48+
public class RollingUpgradeChore extends ScheduledChore {
49+
50+
static final String ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY =
51+
"hbase.master.rolling.upgrade.chore.period.secs";
52+
static final int DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS = 10; // 10 seconds by default
53+
54+
static final String ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY =
55+
"hbase.master.rolling.upgrade.chore.delay.secs";
56+
static final long DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS = 30; // 30 seconds
57+
58+
static final int CONCURRENT_PROCEDURES_COUNT = 5;
59+
60+
private final static Logger LOG = LoggerFactory.getLogger(RollingUpgradeChore.class);
61+
ProcedureExecutor<MasterProcedureEnv> procedureExecutor;
62+
private TableDescriptors tableDescriptors;
63+
private List<MigrateStoreFileTrackerProcedure> processingProcs = new ArrayList<>();
64+
65+
public RollingUpgradeChore(MasterServices masterServices) {
66+
this(masterServices.getConfiguration(), masterServices.getMasterProcedureExecutor(),
67+
masterServices.getTableDescriptors(), masterServices);
68+
}
69+
70+
private RollingUpgradeChore(Configuration conf,
71+
ProcedureExecutor<MasterProcedureEnv> procedureExecutor, TableDescriptors tableDescriptors,
72+
Stoppable stopper) {
73+
super(RollingUpgradeChore.class.getSimpleName(), stopper, conf
74+
.getInt(ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY,
75+
DFAULT_ROLLING_UPGRADE_CHORE_PERIOD_SECONDS), conf
76+
.getLong(ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY,
77+
DEFAULT_ROLLING_UPGRADE_CHORE_DELAY_SECONDS),
78+
TimeUnit.SECONDS);
79+
this.procedureExecutor = procedureExecutor;
80+
this.tableDescriptors = tableDescriptors;
81+
}
82+
83+
@Override
84+
protected void chore() {
85+
if (isCompletelyMigrateSFT(CONCURRENT_PROCEDURES_COUNT)) {
86+
LOG.info("All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore!");
87+
shutdown();
88+
}
89+
}
90+
91+
private boolean isCompletelyMigrateSFT(int concurrentCount){
92+
Iterator<MigrateStoreFileTrackerProcedure> iter = processingProcs.iterator();
93+
while(iter.hasNext()){
94+
MigrateStoreFileTrackerProcedure proc = iter.next();
95+
if(procedureExecutor.isFinished(proc.getProcId())){
96+
iter.remove();
97+
}
98+
}
99+
// No new migration procedures will be submitted until
100+
// all procedures executed last time are completed.
101+
if (!processingProcs.isEmpty()) {
102+
return false;
103+
}
104+
105+
Map<String, TableDescriptor> migrateSFTTables;
106+
try {
107+
migrateSFTTables = tableDescriptors.getAll().entrySet().stream().filter(entry -> {
108+
TableDescriptor td = entry.getValue();
109+
return StringUtils.isEmpty(td.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
110+
}).limit(concurrentCount).collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
111+
} catch (IOException e) {
112+
LOG.warn("Failed to migrate StoreFileTracker", e);
113+
return false;
114+
}
115+
116+
if (migrateSFTTables.isEmpty()) {
117+
LOG.info("There is no table to migrate StoreFileTracker!");
118+
return true;
119+
}
120+
121+
for (Map.Entry<String, TableDescriptor> entry : migrateSFTTables.entrySet()) {
122+
TableDescriptor tableDescriptor = entry.getValue();
123+
MigrateStoreFileTrackerProcedure proc =
124+
new MigrateStoreFileTrackerProcedure(procedureExecutor.getEnvironment(), tableDescriptor);
125+
procedureExecutor.submitProcedure(proc);
126+
processingProcs.add(proc);
127+
}
128+
return false;
129+
}
130+
}
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.regionserver.storefiletracker;
19+
20+
import java.util.Optional;
21+
import org.apache.hadoop.hbase.client.TableDescriptor;
22+
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
23+
import org.apache.hadoop.hbase.master.procedure.ModifyTableDescriptorProcedure;
24+
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
25+
import org.apache.yetus.audience.InterfaceAudience;
26+
27+
/**
28+
* Procedure for migrating StoreFileTracker information to table descriptor.
29+
*/
30+
@InterfaceAudience.Private
31+
public class MigrateStoreFileTrackerProcedure extends ModifyTableDescriptorProcedure {
32+
33+
public MigrateStoreFileTrackerProcedure(){}
34+
35+
public MigrateStoreFileTrackerProcedure(MasterProcedureEnv env, TableDescriptor unmodified) {
36+
super(env, unmodified);
37+
}
38+
39+
@Override
40+
protected Optional<TableDescriptor> modify(MasterProcedureEnv env, TableDescriptor current) {
41+
if (StringUtils.isEmpty(current.getValue(StoreFileTrackerFactory.TRACKER_IMPL))) {
42+
TableDescriptor td =
43+
StoreFileTrackerFactory.updateWithTrackerConfigs(env.getMasterConfiguration(), current);
44+
return Optional.of(td);
45+
}
46+
return Optional.empty();
47+
}
48+
}
Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
/*
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
18+
*/
19+
package org.apache.hadoop.hbase.master.migrate;
20+
21+
import java.io.IOException;
22+
import org.apache.commons.lang3.StringUtils;
23+
import org.apache.hadoop.conf.Configuration;
24+
import org.apache.hadoop.hbase.HBaseClassTestRule;
25+
import org.apache.hadoop.hbase.HBaseConfiguration;
26+
import org.apache.hadoop.hbase.HBaseTestingUtil;
27+
import org.apache.hadoop.hbase.TableDescriptors;
28+
import org.apache.hadoop.hbase.TableName;
29+
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
30+
import org.apache.hadoop.hbase.client.TableDescriptor;
31+
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
32+
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
33+
import org.apache.hadoop.hbase.testclassification.MediumTests;
34+
import org.apache.hadoop.hbase.util.Bytes;
35+
import org.junit.After;
36+
import org.junit.Assert;
37+
import org.junit.Before;
38+
import org.junit.ClassRule;
39+
import org.junit.Test;
40+
import org.junit.experimental.categories.Category;
41+
42+
@Category(MediumTests.class)
43+
public class TestMigrateStoreFileTracker {
44+
@ClassRule
45+
public static final HBaseClassTestRule CLASS_RULE =
46+
HBaseClassTestRule.forClass(TestMigrateStoreFileTracker.class);
47+
private final static String[] tables = new String[] { "t1", "t2", "t3", "t4", "t5", "t6" };
48+
private final static String famStr = "f1";
49+
private final static byte[] fam = Bytes.toBytes(famStr);
50+
51+
private HBaseTestingUtil HTU;
52+
private Configuration conf;
53+
private TableDescriptor tableDescriptor;
54+
55+
@Before
56+
public void setUp() throws Exception {
57+
conf = HBaseConfiguration.create();
58+
//Speed up the launch of RollingUpgradeChore
59+
conf.setInt(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_PERIOD_SECONDS_KEY, 1);
60+
conf.setLong(RollingUpgradeChore.ROLLING_UPGRADE_CHORE_DELAY_SECONDS_KEY, 1);
61+
HTU = new HBaseTestingUtil(conf);
62+
HTU.startMiniCluster();
63+
}
64+
65+
@After
66+
public void tearDown() throws Exception {
67+
HTU.shutdownMiniCluster();
68+
}
69+
70+
@Test
71+
public void testMigrateStoreFileTracker() throws IOException, InterruptedException {
72+
//create tables to test
73+
for (int i = 0; i < tables.length; i++) {
74+
tableDescriptor = HTU.createModifyableTableDescriptor(tables[i])
75+
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam).build()).build();
76+
HTU.createTable(tableDescriptor, null);
77+
}
78+
TableDescriptors tableDescriptors = HTU.getMiniHBaseCluster().getMaster().getTableDescriptors();
79+
for (int i = 0; i < tables.length; i++) {
80+
TableDescriptor tdAfterCreated = tableDescriptors.get(TableName.valueOf(tables[i]));
81+
//make sure that TRACKER_IMPL was set by default after tables have been created.
82+
Assert.assertNotNull(tdAfterCreated.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
83+
//Remove StoreFileTracker impl from tableDescriptor
84+
TableDescriptor tdRemovedSFT = TableDescriptorBuilder.newBuilder(tdAfterCreated)
85+
.removeValue(StoreFileTrackerFactory.TRACKER_IMPL).build();
86+
tableDescriptors.update(tdRemovedSFT);
87+
}
88+
HTU.getMiniHBaseCluster().stopMaster(0).join();
89+
HTU.getMiniHBaseCluster().startMaster();
90+
HTU.getMiniHBaseCluster().waitForActiveAndReadyMaster(30000);
91+
//wait until all tables have been migrated
92+
TableDescriptors tds = HTU.getMiniHBaseCluster().getMaster().getTableDescriptors();
93+
HTU.waitFor(30000, () -> {
94+
try {
95+
for (int i = 0; i < tables.length; i++) {
96+
TableDescriptor td = tds.get(TableName.valueOf(tables[i]));
97+
if (StringUtils.isEmpty(td.getValue(StoreFileTrackerFactory.TRACKER_IMPL))) {
98+
return false;
99+
}
100+
}
101+
return true;
102+
} catch (IOException e) {
103+
return false;
104+
}
105+
});
106+
}
107+
}

0 commit comments

Comments
 (0)