Skip to content

Commit 4ea7922

Browse files
bsglzwchevreuil
authored andcommitted
HBASE-23223 Support the offsetLock of bucketCache to use strong ref (#764)
Signed-off-by: Wellington Chevreuil <[email protected]>
1 parent 77b4e8c commit 4ea7922

File tree

7 files changed

+237
-89
lines changed

7 files changed

+237
-89
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,9 @@
7272
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
7373
import org.apache.hadoop.hbase.util.HasThread;
7474
import org.apache.hadoop.hbase.util.IdReadWriteLock;
75-
import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
75+
import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef;
76+
import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool;
77+
import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool.ReferenceType;
7678
import org.apache.hadoop.util.StringUtils;
7779
import org.apache.yetus.audience.InterfaceAudience;
7880
import org.slf4j.Logger;
@@ -113,6 +115,10 @@ public class BucketCache implements BlockCache, HeapSize {
113115
static final String ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor";
114116
static final String MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor";
115117

118+
/** Use strong reference for offsetLock or not */
119+
private static final String STRONG_REF_KEY = "hbase.bucketcache.offsetlock.usestrongref";
120+
private static final boolean STRONG_REF_DEFAULT = false;
121+
116122
/** Priority buckets */
117123
@VisibleForTesting
118124
static final float DEFAULT_SINGLE_FACTOR = 0.25f;
@@ -199,10 +205,9 @@ public class BucketCache implements BlockCache, HeapSize {
199205
* A ReentrantReadWriteLock to lock on a particular block identified by offset.
200206
* The purpose of this is to avoid freeing the block which is being read.
201207
* <p>
202-
* Key set of offsets in BucketCache is limited so soft reference is the best choice here.
203208
*/
204209
@VisibleForTesting
205-
transient final IdReadWriteLock<Long> offsetLock = new IdReadWriteLock<>(ReferenceType.SOFT);
210+
transient final IdReadWriteLock<Long> offsetLock;
206211

207212
private final NavigableSet<BlockCacheKey> blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> {
208213
int nameComparison = a.getHfileName().compareTo(b.getHfileName());
@@ -257,6 +262,12 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck
257262
public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes,
258263
int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration,
259264
Configuration conf) throws IOException {
265+
boolean useStrongRef = conf.getBoolean(STRONG_REF_KEY, STRONG_REF_DEFAULT);
266+
if (useStrongRef) {
267+
this.offsetLock = new IdReadWriteLockStrongRef<>();
268+
} else {
269+
this.offsetLock = new IdReadWriteLockWithObjectPool<>(ReferenceType.SOFT);
270+
}
260271
this.algorithm = conf.get(FILE_VERIFY_ALGORITHM, DEFAULT_FILE_VERIFY_ALGORITHM);
261272
this.ioEngine = getIOEngineFromName(ioEngineName, capacity, persistencePath);
262273
this.writerThreads = new WriterThread[writerThreadNum];
@@ -277,7 +288,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck
277288

278289
LOG.info("Instantiating BucketCache with acceptableFactor: " + acceptableFactor + ", minFactor: " + minFactor +
279290
", extraFreeFactor: " + extraFreeFactor + ", singleFactor: " + singleFactor + ", multiFactor: " + multiFactor +
280-
", memoryFactor: " + memoryFactor);
291+
", memoryFactor: " + memoryFactor + ", useStrongRef: " + useStrongRef);
281292

282293
this.cacheCapacity = capacity;
283294
this.persistencePath = persistencePath;

hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
import org.apache.hadoop.hbase.client.TableState;
3737
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
3838
import org.apache.hadoop.hbase.util.IdReadWriteLock;
39+
import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool;
3940
import org.apache.hadoop.hbase.util.ZKDataMigrator;
4041
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
4142
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -62,7 +63,7 @@ public class TableStateManager {
6263
private static final String MIGRATE_TABLE_STATE_FROM_ZK_KEY =
6364
"hbase.migrate.table.state.from.zookeeper";
6465

65-
private final IdReadWriteLock<TableName> tnLock = new IdReadWriteLock<>();
66+
private final IdReadWriteLock<TableName> tnLock = new IdReadWriteLockWithObjectPool<>();
6667
protected final MasterServices master;
6768

6869
private final ConcurrentMap<TableName, TableState.State> tableName2State =

hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdReadWriteLock.java

Lines changed: 3 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,9 @@
1818
*/
1919
package org.apache.hadoop.hbase.util;
2020

21-
import java.lang.ref.Reference;
2221
import java.util.concurrent.locks.ReentrantReadWriteLock;
2322

2423
import org.apache.yetus.audience.InterfaceAudience;
25-
2624
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
2725

2826
/**
@@ -42,80 +40,13 @@
4240
* For write lock, use lock.writeLock()
4341
*/
4442
@InterfaceAudience.Private
45-
public class IdReadWriteLock<T> {
46-
// The number of lock we want to easily support. It's not a maximum.
47-
private static final int NB_CONCURRENT_LOCKS = 1000;
48-
/**
49-
* The pool to get entry from, entries are mapped by {@link Reference} and will be automatically
50-
* garbage-collected by JVM
51-
*/
52-
private final ObjectPool<T, ReentrantReadWriteLock> lockPool;
53-
private final ReferenceType refType;
54-
55-
public IdReadWriteLock() {
56-
this(ReferenceType.WEAK);
57-
}
58-
59-
/**
60-
* Constructor of IdReadWriteLock
61-
* @param referenceType type of the reference used in lock pool, {@link ReferenceType#WEAK} by
62-
* default. Use {@link ReferenceType#SOFT} if the key set is limited and the locks will
63-
* be reused with a high frequency
64-
*/
65-
public IdReadWriteLock(ReferenceType referenceType) {
66-
this.refType = referenceType;
67-
switch (referenceType) {
68-
case SOFT:
69-
lockPool = new SoftObjectPool<>(new ObjectPool.ObjectFactory<T, ReentrantReadWriteLock>() {
70-
@Override
71-
public ReentrantReadWriteLock createObject(T id) {
72-
return new ReentrantReadWriteLock();
73-
}
74-
}, NB_CONCURRENT_LOCKS);
75-
break;
76-
case WEAK:
77-
default:
78-
lockPool = new WeakObjectPool<>(new ObjectPool.ObjectFactory<T, ReentrantReadWriteLock>() {
79-
@Override
80-
public ReentrantReadWriteLock createObject(T id) {
81-
return new ReentrantReadWriteLock();
82-
}
83-
}, NB_CONCURRENT_LOCKS);
84-
}
85-
}
86-
87-
public static enum ReferenceType {
88-
WEAK, SOFT
89-
}
90-
91-
/**
92-
* Get the ReentrantReadWriteLock corresponding to the given id
93-
* @param id an arbitrary number to identify the lock
94-
*/
95-
public ReentrantReadWriteLock getLock(T id) {
96-
lockPool.purge();
97-
ReentrantReadWriteLock readWriteLock = lockPool.get(id);
98-
return readWriteLock;
99-
}
100-
101-
/** For testing */
102-
@VisibleForTesting
103-
int purgeAndGetEntryPoolSize() {
104-
gc();
105-
Threads.sleep(200);
106-
lockPool.purge();
107-
return lockPool.size();
108-
}
109-
110-
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional")
111-
private void gc() {
112-
System.gc();
113-
}
43+
public abstract class IdReadWriteLock<T> {
44+
public abstract ReentrantReadWriteLock getLock(T id);
11445

11546
@VisibleForTesting
11647
public void waitForWaiters(T id, int numWaiters) throws InterruptedException {
11748
for (ReentrantReadWriteLock readWriteLock;;) {
118-
readWriteLock = lockPool.get(id);
49+
readWriteLock = getLock(id);
11950
if (readWriteLock != null) {
12051
synchronized (readWriteLock) {
12152
if (readWriteLock.getQueueLength() >= numWaiters) {
@@ -126,9 +57,4 @@ public void waitForWaiters(T id, int numWaiters) throws InterruptedException {
12657
Thread.sleep(50);
12758
}
12859
}
129-
130-
@VisibleForTesting
131-
public ReferenceType getReferenceType() {
132-
return this.refType;
133-
}
13460
}
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
/*
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
18+
*/
19+
package org.apache.hadoop.hbase.util;
20+
21+
import java.util.concurrent.ConcurrentHashMap;
22+
import java.util.concurrent.locks.ReentrantReadWriteLock;
23+
24+
import org.apache.yetus.audience.InterfaceAudience;
25+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
26+
27+
@InterfaceAudience.Private
28+
public class IdReadWriteLockStrongRef<T> extends IdReadWriteLock<T> {
29+
30+
final ConcurrentHashMap<T, ReentrantReadWriteLock> map = new ConcurrentHashMap<>();
31+
32+
@VisibleForTesting
33+
@Override
34+
public ReentrantReadWriteLock getLock(T id) {
35+
ReentrantReadWriteLock existing = map.get(id);
36+
if (existing != null) {
37+
return existing;
38+
}
39+
40+
ReentrantReadWriteLock newLock = new ReentrantReadWriteLock();
41+
existing = map.putIfAbsent(id, newLock);
42+
if (existing == null) {
43+
return newLock;
44+
} else {
45+
return existing;
46+
}
47+
}
48+
49+
}
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
/*
2+
*
3+
* Licensed to the Apache Software Foundation (ASF) under one
4+
* or more contributor license agreements. See the NOTICE file
5+
* distributed with this work for additional information
6+
* regarding copyright ownership. The ASF licenses this file
7+
* to you under the Apache License, Version 2.0 (the
8+
* "License"); you may not use this file except in compliance
9+
* with the License. You may obtain a copy of the License at
10+
*
11+
* http://www.apache.org/licenses/LICENSE-2.0
12+
*
13+
* Unless required by applicable law or agreed to in writing, software
14+
* distributed under the License is distributed on an "AS IS" BASIS,
15+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
* See the License for the specific language governing permissions and
17+
* limitations under the License.
18+
*/
19+
package org.apache.hadoop.hbase.util;
20+
21+
import java.lang.ref.Reference;
22+
import java.util.concurrent.locks.ReentrantReadWriteLock;
23+
24+
import org.apache.yetus.audience.InterfaceAudience;
25+
26+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
27+
28+
@InterfaceAudience.Private
29+
public class IdReadWriteLockWithObjectPool<T> extends IdReadWriteLock<T>{
30+
// The number of lock we want to easily support. It's not a maximum.
31+
private static final int NB_CONCURRENT_LOCKS = 1000;
32+
/**
33+
* The pool to get entry from, entries are mapped by {@link Reference} and will be automatically
34+
* garbage-collected by JVM
35+
*/
36+
private final ObjectPool<T, ReentrantReadWriteLock> lockPool;
37+
private final ReferenceType refType;
38+
39+
public IdReadWriteLockWithObjectPool() {
40+
this(ReferenceType.WEAK);
41+
}
42+
43+
/**
44+
* Constructor of IdReadWriteLockWithObjectPool
45+
* @param referenceType type of the reference used in lock pool, {@link ReferenceType#WEAK} by
46+
* default. Use {@link ReferenceType#SOFT} if the key set is limited and the locks will
47+
* be reused with a high frequency
48+
*/
49+
public IdReadWriteLockWithObjectPool(ReferenceType referenceType) {
50+
this.refType = referenceType;
51+
switch (referenceType) {
52+
case SOFT:
53+
lockPool = new SoftObjectPool<>(new ObjectPool.ObjectFactory<T, ReentrantReadWriteLock>() {
54+
@Override
55+
public ReentrantReadWriteLock createObject(T id) {
56+
return new ReentrantReadWriteLock();
57+
}
58+
}, NB_CONCURRENT_LOCKS);
59+
break;
60+
case WEAK:
61+
default:
62+
lockPool = new WeakObjectPool<>(new ObjectPool.ObjectFactory<T, ReentrantReadWriteLock>() {
63+
@Override
64+
public ReentrantReadWriteLock createObject(T id) {
65+
return new ReentrantReadWriteLock();
66+
}
67+
}, NB_CONCURRENT_LOCKS);
68+
}
69+
}
70+
71+
public static enum ReferenceType {
72+
WEAK, SOFT
73+
}
74+
75+
/**
76+
* Get the ReentrantReadWriteLock corresponding to the given id
77+
* @param id an arbitrary number to identify the lock
78+
*/
79+
@Override
80+
public ReentrantReadWriteLock getLock(T id) {
81+
lockPool.purge();
82+
ReentrantReadWriteLock readWriteLock = lockPool.get(id);
83+
return readWriteLock;
84+
}
85+
86+
/** For testing */
87+
@VisibleForTesting
88+
int purgeAndGetEntryPoolSize() {
89+
gc();
90+
Threads.sleep(200);
91+
lockPool.purge();
92+
return lockPool.size();
93+
}
94+
95+
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DM_GC", justification="Intentional")
96+
private void gc() {
97+
System.gc();
98+
}
99+
100+
@VisibleForTesting
101+
public ReferenceType getReferenceType() {
102+
return this.refType;
103+
}
104+
}
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.util;
19+
20+
import java.util.concurrent.locks.ReentrantReadWriteLock;
21+
22+
import org.apache.hadoop.hbase.HBaseClassTestRule;
23+
import org.apache.hadoop.hbase.testclassification.SmallTests;
24+
import org.junit.Assert;
25+
import org.junit.ClassRule;
26+
import org.junit.Test;
27+
import org.junit.experimental.categories.Category;
28+
import org.slf4j.Logger;
29+
import org.slf4j.LoggerFactory;
30+
31+
32+
@Category({ SmallTests.class })
33+
public class TestIdReadWriteLockStrongRef {
34+
35+
@ClassRule
36+
public static final HBaseClassTestRule CLASS_RULE =
37+
HBaseClassTestRule.forClass(TestIdReadWriteLockStrongRef.class);
38+
39+
private static final Logger LOG = LoggerFactory.getLogger(TestIdReadWriteLockStrongRef.class);
40+
41+
private IdReadWriteLockStrongRef<Long> idLock = new IdReadWriteLockStrongRef<>();
42+
43+
@Test
44+
public void testGetLock() throws Exception {
45+
Long offset_1 = 1L;
46+
Long offset_2 = 2L;
47+
ReentrantReadWriteLock offsetLock_1 = idLock.getLock(offset_1);
48+
ReentrantReadWriteLock offsetLock_2 = idLock.getLock(offset_1);
49+
Assert.assertEquals(offsetLock_1,offsetLock_2);
50+
ReentrantReadWriteLock offsetLock_3 = idLock.getLock(offset_2);
51+
Assert.assertNotEquals(offsetLock_1,offsetLock_3);
52+
}
53+
54+
}
55+

0 commit comments

Comments
 (0)