From 62dab60b3e2458fae1f766d8b33daa15997cb8fd Mon Sep 17 00:00:00 2001 From: Peter Hofer Date: Tue, 25 Feb 2025 19:37:19 +0100 Subject: [PATCH 1/4] Disable periodic full GCs for compacting GC. --- .../oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java index d53ad5a9c5ba..3a4d7452bb1e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AdaptiveCollectionPolicy.java @@ -189,7 +189,8 @@ public boolean shouldCollectCompletely(boolean followingIncrementalCollection) { return false; } - if (minorCountSinceMajorCollection * avgMinorPause.getAverage() >= CONSECUTIVE_MINOR_TO_MAJOR_COLLECTION_PAUSE_TIME_RATIO * avgMajorPause.getPaddedAverage()) { + if (!SerialGCOptions.useCompactingOldGen() && + minorCountSinceMajorCollection * avgMinorPause.getAverage() >= CONSECUTIVE_MINOR_TO_MAJOR_COLLECTION_PAUSE_TIME_RATIO * avgMajorPause.getPaddedAverage()) { /* * When we do many incremental collections in a row because they reclaim sufficient * space, still trigger a complete collection when reaching a cumulative pause time From 05ac3b3abe7ed938c9bfb685a6e0df67e61b456c Mon Sep 17 00:00:00 2001 From: Peter Hofer Date: Wed, 26 Feb 2025 18:33:24 +0100 Subject: [PATCH 2/4] Minor fixes to comments. --- .../oracle/svm/core/genscavenge/AbstractCollectionPolicy.java | 2 +- .../oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java | 2 +- .../src/com/oracle/svm/core/genscavenge/remset/BrickTable.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java index 24fea741df28..8e689e0a2ea3 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/AbstractCollectionPolicy.java @@ -297,7 +297,7 @@ public UnsignedWord getMaximumFreeAlignedChunksSize() { /* * Keep chunks ready for allocations in eden and for the survivor to-spaces during young * collections (although we might keep too many aligned chunks when large objects in - * unallocated chunks are also allocated). We could alternatively return + * unaligned chunks are also allocated). We could alternatively return * getCurrentHeapCapacity() to have chunks ready during full GCs as well. */ UnsignedWord total = edenSize.add(survivorSize); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java index 5ed700cc0b54..e60f16206e4b 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ReciprocalLeastSquareFit.java @@ -25,7 +25,7 @@ package com.oracle.svm.core.genscavenge; /** - * Least squares fitting on a data set to generate an equation y = b + a / x. Uses exponential decay + * Least squares fitting on a data set to generate an equation y = a + b / x. Uses exponential decay * to assign a higher weight to newly added data points and effectively drop old data points without * keeping a history. * diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java index e6e61c8d3f05..9184a4e91645 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/BrickTable.java @@ -40,7 +40,7 @@ /** * Inspired by the .NET CoreCLR GC, the {@link BrickTable} speeds up lookups of new object locations * after compaction by acting as a lookup table for {@link ObjectMoveInfo} structures. Each entry - * stores a pointer to the start of the first such structure for the fraction of the chunk that it + * stores the offset of the start of the first such structure for the fraction of the chunk that it * covers. It borrows the memory of a chunk's {@link CardTable}. */ public final class BrickTable { From 762ce84420ed63a5f4560a264b548c80bd4f1c0e Mon Sep 17 00:00:00 2001 From: Peter Hofer Date: Tue, 21 May 2024 22:02:30 +0200 Subject: [PATCH 3/4] Populate first object table during planning. --- .../genscavenge/CompactingOldGeneration.java | 2 +- .../oracle/svm/core/genscavenge/Space.java | 2 +- .../compacting/PlanningVisitor.java | 65 +++++++++++++------ .../remset/AlignedChunkRememberedSet.java | 22 +++---- .../genscavenge/remset/FirstObjectTable.java | 9 +-- 5 files changed, 64 insertions(+), 36 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java index d925a91b27df..3c723dd15cda 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java @@ -407,7 +407,7 @@ private void compact(Timers timers) { * chunk during compaction. The remembered set bits are already set after planning. */ if (!AlignedHeapChunk.isEmpty(chunk)) { - RememberedSet.get().enableRememberedSetForChunk(chunk); + RememberedSet.get().clearRememberedSet(chunk); } // empty chunks will be freed or reset before reuse, no need to reinitialize here chunk = HeapChunk.getNext(chunk); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index 679701947b59..134db7aa3110 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -449,7 +449,7 @@ private Object copyAlignedObject(Object originalObj) { if (SerialGCOptions.useCompactingOldGen() && GCImpl.getGCImpl().isCompleteCollection()) { /* * In a compacting complete collection, the remembered set bit is set already during - * marking and the first object table is built later during compaction. + * marking and the first object table is built during planning. */ } else { /* diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java index 5d3c0ff28f8b..64cc8e4c3da9 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java @@ -34,7 +34,9 @@ import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; import com.oracle.svm.core.genscavenge.Space; +import com.oracle.svm.core.genscavenge.remset.AlignedChunkRememberedSet; import com.oracle.svm.core.genscavenge.remset.BrickTable; +import com.oracle.svm.core.genscavenge.remset.FirstObjectTable; import com.oracle.svm.core.hub.LayoutEncoding; import jdk.graal.compiler.word.Word; @@ -56,6 +58,9 @@ public PlanningVisitor() { public void init(Space space) { allocChunk = space.getFirstAlignedHeapChunk(); allocPointer = AlignedHeapChunk.getObjectsStart(allocChunk); + if (!allocChunk.getShouldSweepInsteadOfCompact()) { + FirstObjectTable.initializeTable(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), AlignedChunkRememberedSet.getFirstObjectTableSize()); + } } @Override @@ -69,7 +74,7 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { UnsignedWord brickIndex = Word.zero(); /* Initialize the move info structure at the chunk's object start location. */ - ObjectMoveInfo.setNewAddress(objSeq, allocPointer); + ObjectMoveInfo.setNewAddress(objSeq, objSeq); ObjectMoveInfo.setObjectSeqSize(objSeq, Word.zero()); ObjectMoveInfo.setNextObjectSeqOffset(objSeq, Word.zero()); @@ -115,12 +120,43 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { } objSeqSize = objSeqSize.add(objSize); + if (!sweeping) { + if (allocPointer.add(objSeqSize).aboveThan(AlignedHeapChunk.getObjectsEnd(allocChunk))) { + /* Out of space, move to the start of the next chunk. */ + allocChunk = HeapChunk.getNext(allocChunk); + assert allocChunk.isNonNull(); + assert !allocChunk.getShouldSweepInsteadOfCompact(); + allocPointer = AlignedHeapChunk.getObjectsStart(allocChunk); + + /* + * TODO: we should reset the FOT entries we already wrote in the last chunk + * (but they should not be accessed, not even by heap verification) + */ + + /* Visit previous objects in sequence again to write new FOT entries. */ + FirstObjectTable.initializeTable(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), AlignedChunkRememberedSet.getFirstObjectTableSize()); + Pointer q = objSeq; + while (q.notEqual(p)) { + UnsignedWord offset = q.subtract(objSeq); + UnsignedWord size = LayoutEncoding.getSizeFromObjectInlineInGC(q.toObject()); + FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), offset, offset.add(size)); + q = q.add(size); + } + } + + Pointer allocEndOffset = allocPointer.add(objSeqSize).subtract(AlignedHeapChunk.getObjectsStart(allocChunk)); + FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), allocEndOffset.subtract(objSize), allocEndOffset); + } } else { // not marked, i.e. not alive and start of a gap of yet unknown size if (objSeqSize.notEqual(0)) { // end of an object sequence - Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); - ObjectMoveInfo.setNewAddress(objSeq, newAddress); ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); + if (sweeping) { + ObjectMoveInfo.setNewAddress(objSeq, objSeq); + } else { + ObjectMoveInfo.setNewAddress(objSeq, allocPointer); + allocPointer = allocPointer.add(objSeqSize); // ensured enough memory above + } objSeqSize = Word.zero(); @@ -139,10 +175,15 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { if (gapSize.notEqual(0)) { // truncate gap at chunk end chunk.setTopOffset(chunk.getTopOffset().subtract(gapSize)); + } else if (objSeqSize.notEqual(0)) { - Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); - ObjectMoveInfo.setNewAddress(objSeq, newAddress); ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); + if (sweeping) { + ObjectMoveInfo.setNewAddress(objSeq, objSeq); + } else { + ObjectMoveInfo.setNewAddress(objSeq, allocPointer); + allocPointer = allocPointer.add(objSeqSize); // ensured enough memory above + } } if (sweeping) { @@ -168,18 +209,4 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { return true; } - - private Pointer allocate(UnsignedWord size) { - Pointer p = allocPointer; - allocPointer = allocPointer.add(size); - if (allocPointer.aboveThan(AlignedHeapChunk.getObjectsEnd(allocChunk))) { - allocChunk = HeapChunk.getNext(allocChunk); - assert allocChunk.isNonNull(); - assert !allocChunk.getShouldSweepInsteadOfCompact(); - - p = AlignedHeapChunk.getObjectsStart(allocChunk); - allocPointer = p.add(size); - } - return p; - } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java index 05bae0b8338a..4b279660c240 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/AlignedChunkRememberedSet.java @@ -53,7 +53,7 @@ import jdk.graal.compiler.replacements.nodes.AssertionNode; import jdk.graal.compiler.word.Word; -final class AlignedChunkRememberedSet { +public final class AlignedChunkRememberedSet { private AlignedChunkRememberedSet() { } @@ -63,7 +63,7 @@ public static int wordSize() { } @Fold - public static UnsignedWord getHeaderSize() { + static UnsignedWord getHeaderSize() { UnsignedWord headerSize = getFirstObjectTableLimitOffset(); if (SerialGCOptions.useCompactingOldGen()) { // Compaction needs room for a ObjectMoveInfo structure before the first object. @@ -74,7 +74,7 @@ public static UnsignedWord getHeaderSize() { } @Platforms(Platform.HOSTED_ONLY.class) - public static void enableRememberedSet(HostedByteBufferPointer chunk, int chunkPosition, List objects) { + static void enableRememberedSet(HostedByteBufferPointer chunk, int chunkPosition, List objects) { // Completely clean the card table and the first object table. CardTable.cleanTable(getCardTableStart(chunk), getCardTableSize()); FirstObjectTable.initializeTable(getFirstObjectTableStart(chunk), getFirstObjectTableSize()); @@ -94,7 +94,7 @@ public static void enableRememberedSet(HostedByteBufferPointer chunk, int chunkP @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void enableRememberedSetForObject(AlignedHeader chunk, Object obj, UnsignedWord objSize) { + static void enableRememberedSetForObject(AlignedHeader chunk, Object obj, UnsignedWord objSize) { Pointer fotStart = getFirstObjectTableStart(chunk); Pointer objectsStart = AlignedHeapChunk.getObjectsStart(chunk); @@ -107,7 +107,7 @@ public static void enableRememberedSetForObject(AlignedHeader chunk, Object obj, } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void enableRememberedSet(AlignedHeader chunk) { + static void enableRememberedSet(AlignedHeader chunk) { // Completely clean the card table and the first object table as further objects may be // added later on to this chunk. CardTable.cleanTable(getCardTableStart(chunk), getCardTableSize()); @@ -124,7 +124,7 @@ public static void enableRememberedSet(AlignedHeader chunk) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void clearRememberedSet(AlignedHeader chunk) { + static void clearRememberedSet(AlignedHeader chunk) { CardTable.cleanTable(getCardTableStart(chunk), getCardTableSize()); } @@ -133,7 +133,7 @@ public static void clearRememberedSet(AlignedHeader chunk) { * the post-write barrier. */ @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void dirtyCardForObject(Object object, boolean verifyOnly) { + static void dirtyCardForObject(Object object, boolean verifyOnly) { Pointer objectPointer = Word.objectToUntrackedPointer(object); AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(objectPointer); Pointer cardTableStart = getCardTableStart(chunk); @@ -146,7 +146,7 @@ public static void dirtyCardForObject(Object object, boolean verifyOnly) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void walkDirtyObjects(AlignedHeader chunk, UninterruptibleObjectVisitor visitor, boolean clean) { + static void walkDirtyObjects(AlignedHeader chunk, UninterruptibleObjectVisitor visitor, boolean clean) { Pointer objectsStart = AlignedHeapChunk.getObjectsStart(chunk); Pointer objectsLimit = HeapChunk.getTopPointer(chunk); UnsignedWord memorySize = objectsLimit.subtract(objectsStart); @@ -212,7 +212,7 @@ private static void walkObjects(AlignedHeader chunk, Pointer start, Pointer end, } } - public static boolean verify(AlignedHeader chunk) { + static boolean verify(AlignedHeader chunk) { boolean success = true; success &= CardTable.verify(getCardTableStart(chunk), getCardTableEnd(chunk), AlignedHeapChunk.getObjectsStart(chunk), HeapChunk.getTopPointer(chunk)); success &= FirstObjectTable.verify(getFirstObjectTableStart(chunk), AlignedHeapChunk.getObjectsStart(chunk), HeapChunk.getTopPointer(chunk)); @@ -242,7 +242,7 @@ static UnsignedWord getCardTableSize() { } @Fold - static UnsignedWord getFirstObjectTableSize() { + public static UnsignedWord getFirstObjectTableSize() { return getCardTableSize(); } @@ -294,7 +294,7 @@ private static Pointer getCardTableEnd(AlignedHeader chunk) { } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - private static Pointer getFirstObjectTableStart(AlignedHeader chunk) { + public static Pointer getFirstObjectTableStart(AlignedHeader chunk) { return getFirstObjectTableStart(HeapChunk.asPointer(chunk)); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/FirstObjectTable.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/FirstObjectTable.java index da526e240466..204baa696015 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/FirstObjectTable.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/FirstObjectTable.java @@ -24,7 +24,6 @@ */ package com.oracle.svm.core.genscavenge.remset; -import jdk.graal.compiler.word.Word; import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; @@ -37,6 +36,8 @@ import com.oracle.svm.core.log.Log; import com.oracle.svm.core.util.UnsignedUtils; +import jdk.graal.compiler.word.Word; + /** * A "first object table" to tell me the start of the first object that crosses onto a card * remembered set memory region. @@ -109,7 +110,7 @@ *

* Implementation note: Table entries are bytes but converted to and from ints with bounds checks. */ -final class FirstObjectTable { +public final class FirstObjectTable { /** * The number of bytes of memory covered by an entry. Since the indexes into the CardTable are * used to index into the FirstObjectTable, these need to have the same value. @@ -237,7 +238,7 @@ public static void setTableForObject(Pointer table, UnsignedWord startOffset, Un */ @AlwaysInline("GC performance") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static Pointer getFirstObjectImprecise(Pointer tableStart, Pointer objectsStart, UnsignedWord index) { + static Pointer getFirstObjectImprecise(Pointer tableStart, Pointer objectsStart, UnsignedWord index) { Pointer result; Pointer firstObject = getFirstObject(tableStart, objectsStart, index); Pointer indexedMemoryStart = objectsStart.add(indexToMemoryOffset(index)); @@ -295,7 +296,7 @@ private static UnsignedWord entryToMemoryOffset(UnsignedWord index, int entry) { return indexOffset.subtract(entryOffset); } - public static boolean verify(Pointer tableStart, Pointer objectsStart, Pointer objectsLimit) { + static boolean verify(Pointer tableStart, Pointer objectsStart, Pointer objectsLimit) { UnsignedWord indexLimit = getTableSizeForMemoryRange(objectsStart, objectsLimit); for (UnsignedWord index = Word.unsigned(0); index.belowThan(indexLimit); index = index.add(1)) { Pointer objStart = getFirstObject(tableStart, objectsStart, index); From 1c31464509675da67ad7f96032a7500bc2022bdc Mon Sep 17 00:00:00 2001 From: Peter Hofer Date: Tue, 21 May 2024 23:29:21 +0200 Subject: [PATCH 4/4] Populate first object table during fix-up instead. --- .../genscavenge/CompactingOldGeneration.java | 12 +--- .../oracle/svm/core/genscavenge/Space.java | 2 +- .../compacting/ObjectMoveInfo.java | 41 +++++++++--- .../compacting/PlanningVisitor.java | 63 ++++++------------- .../compacting/SweepingVisitor.java | 13 ++-- 5 files changed, 65 insertions(+), 66 deletions(-) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java index 3c723dd15cda..48ae7b3663e0 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/CompactingOldGeneration.java @@ -284,7 +284,7 @@ private void fixupReferencesBeforeCompaction(ChunkReleaser chunkReleaser, Timers try { AlignedHeapChunk.AlignedHeader aChunk = space.getFirstAlignedHeapChunk(); while (aChunk.isNonNull()) { - ObjectMoveInfo.walkObjects(aChunk, fixupVisitor); + ObjectMoveInfo.walkObjectsForFixup(aChunk, fixupVisitor); aChunk = HeapChunk.getNext(aChunk); } } finally { @@ -396,16 +396,10 @@ private void compact(Timers timers) { Timer oldCompactionRememberedSetsTimer = timers.oldCompactionRememberedSets.open(); try { + // Clear the card tables (which currently contain brick tables). + // The first-object tables have already been populated. chunk = space.getFirstAlignedHeapChunk(); while (chunk.isNonNull()) { - /* - * Clears the card table (which currently contains the brick table) and updates the - * first object table. - * - * GR-54022: we should be able to avoid this pass and build the first object tables - * during planning and reset card tables once we detect that we are finished with a - * chunk during compaction. The remembered set bits are already set after planning. - */ if (!AlignedHeapChunk.isEmpty(chunk)) { RememberedSet.get().clearRememberedSet(chunk); } // empty chunks will be freed or reset before reuse, no need to reinitialize here diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java index 134db7aa3110..4b74575fe3de 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/Space.java @@ -449,7 +449,7 @@ private Object copyAlignedObject(Object originalObj) { if (SerialGCOptions.useCompactingOldGen() && GCImpl.getGCImpl().isCompleteCollection()) { /* * In a compacting complete collection, the remembered set bit is set already during - * marking and the first object table is built during planning. + * marking and the first object table is built later during fix-up. */ } else { /* diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java index e0e0d9fe9240..c4b5325f6937 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/ObjectMoveInfo.java @@ -26,8 +26,6 @@ import static com.oracle.svm.core.Uninterruptible.CALLED_FROM_UNINTERRUPTIBLE_CODE; -import com.oracle.svm.core.util.VMError; -import jdk.graal.compiler.word.Word; import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; @@ -37,10 +35,14 @@ import com.oracle.svm.core.genscavenge.AlignedHeapChunk; import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; +import com.oracle.svm.core.genscavenge.remset.AlignedChunkRememberedSet; import com.oracle.svm.core.genscavenge.remset.BrickTable; +import com.oracle.svm.core.genscavenge.remset.FirstObjectTable; import com.oracle.svm.core.hub.LayoutEncoding; +import com.oracle.svm.core.util.VMError; import jdk.graal.compiler.api.replacements.Fold; +import jdk.graal.compiler.word.Word; /** * {@link PlanningVisitor} decides where objects will be moved and uses the methods of this class to @@ -165,21 +167,46 @@ static boolean useCompressedLayout() { * @see AlignedHeapChunk#walkObjects */ @Uninterruptible(reason = CALLED_FROM_UNINTERRUPTIBLE_CODE, mayBeInlined = true) - public static void walkObjects(AlignedHeapChunk.AlignedHeader chunkHeader, ObjectFixupVisitor visitor) { - Pointer p = AlignedHeapChunk.getObjectsStart(chunkHeader); + public static void walkObjectsForFixup(AlignedHeapChunk.AlignedHeader chunk, ObjectFixupVisitor visitor) { + FirstObjectTable.initializeTable(AlignedChunkRememberedSet.getFirstObjectTableStart(chunk), AlignedChunkRememberedSet.getFirstObjectTableSize()); + + Pointer p = AlignedHeapChunk.getObjectsStart(chunk); do { - Pointer nextObjSeq = getNextObjectSeqAddress(p); - Pointer objSeqEnd = p.add(getObjectSeqSize(p)); - assert objSeqEnd.belowOrEqual(HeapChunk.getTopPointer(chunkHeader)); + Pointer objSeq = p; + Pointer nextObjSeq = getNextObjectSeqAddress(objSeq); + Pointer objSeqNewAddress = getNewAddress(objSeq); + AlignedHeapChunk.AlignedHeader objSeqNewChunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(objSeqNewAddress); + Pointer objSeqEnd = objSeq.add(getObjectSeqSize(objSeq)); + assert objSeqEnd.belowOrEqual(HeapChunk.getTopPointer(chunk)); while (p.notEqual(objSeqEnd)) { assert p.belowThan(objSeqEnd); Object obj = p.toObject(); UnsignedWord objSize = LayoutEncoding.getSizeFromObjectInlineInGC(obj); + + /* + * Add the object's new location to the first object table of the target chunk. Note + * that we have already encountered that chunk and initialized its table earlier. + * + * Rebuilding the table is also required for swept chunks, where dead objects can + * mean that another object is now the first object in a range. + */ + Pointer newAddress = objSeqNewAddress.add(p.subtract(objSeq)); + UnsignedWord offset = newAddress.subtract(AlignedHeapChunk.getObjectsStart(objSeqNewChunk)); + FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(objSeqNewChunk), offset, offset.add(objSize)); + if (!visitor.visitObjectInline(obj)) { throw VMError.shouldNotReachHereAtRuntime(); } + p = p.add(objSize); } + if (nextObjSeq.isNonNull() && chunk.getShouldSweepInsteadOfCompact()) { + // We will write a filler object here, add the location to the first object table. + assert p.belowThan(nextObjSeq); + UnsignedWord offset = p.subtract(AlignedHeapChunk.getObjectsStart(chunk)); + UnsignedWord size = nextObjSeq.subtract(p); + FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(chunk), offset, offset.add(size)); + } p = nextObjSeq; } while (p.isNonNull()); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java index 64cc8e4c3da9..f7e1a0f6aff0 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/PlanningVisitor.java @@ -34,9 +34,7 @@ import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.ObjectHeaderImpl; import com.oracle.svm.core.genscavenge.Space; -import com.oracle.svm.core.genscavenge.remset.AlignedChunkRememberedSet; import com.oracle.svm.core.genscavenge.remset.BrickTable; -import com.oracle.svm.core.genscavenge.remset.FirstObjectTable; import com.oracle.svm.core.hub.LayoutEncoding; import jdk.graal.compiler.word.Word; @@ -58,9 +56,6 @@ public PlanningVisitor() { public void init(Space space) { allocChunk = space.getFirstAlignedHeapChunk(); allocPointer = AlignedHeapChunk.getObjectsStart(allocChunk); - if (!allocChunk.getShouldSweepInsteadOfCompact()) { - FirstObjectTable.initializeTable(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), AlignedChunkRememberedSet.getFirstObjectTableSize()); - } } @Override @@ -120,43 +115,12 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { } objSeqSize = objSeqSize.add(objSize); - if (!sweeping) { - if (allocPointer.add(objSeqSize).aboveThan(AlignedHeapChunk.getObjectsEnd(allocChunk))) { - /* Out of space, move to the start of the next chunk. */ - allocChunk = HeapChunk.getNext(allocChunk); - assert allocChunk.isNonNull(); - assert !allocChunk.getShouldSweepInsteadOfCompact(); - allocPointer = AlignedHeapChunk.getObjectsStart(allocChunk); - - /* - * TODO: we should reset the FOT entries we already wrote in the last chunk - * (but they should not be accessed, not even by heap verification) - */ - - /* Visit previous objects in sequence again to write new FOT entries. */ - FirstObjectTable.initializeTable(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), AlignedChunkRememberedSet.getFirstObjectTableSize()); - Pointer q = objSeq; - while (q.notEqual(p)) { - UnsignedWord offset = q.subtract(objSeq); - UnsignedWord size = LayoutEncoding.getSizeFromObjectInlineInGC(q.toObject()); - FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), offset, offset.add(size)); - q = q.add(size); - } - } - - Pointer allocEndOffset = allocPointer.add(objSeqSize).subtract(AlignedHeapChunk.getObjectsStart(allocChunk)); - FirstObjectTable.setTableForObject(AlignedChunkRememberedSet.getFirstObjectTableStart(allocChunk), allocEndOffset.subtract(objSize), allocEndOffset); - } } else { // not marked, i.e. not alive and start of a gap of yet unknown size if (objSeqSize.notEqual(0)) { // end of an object sequence + Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); + ObjectMoveInfo.setNewAddress(objSeq, newAddress); ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); - if (sweeping) { - ObjectMoveInfo.setNewAddress(objSeq, objSeq); - } else { - ObjectMoveInfo.setNewAddress(objSeq, allocPointer); - allocPointer = allocPointer.add(objSeqSize); // ensured enough memory above - } objSeqSize = Word.zero(); @@ -175,15 +139,10 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { if (gapSize.notEqual(0)) { // truncate gap at chunk end chunk.setTopOffset(chunk.getTopOffset().subtract(gapSize)); - } else if (objSeqSize.notEqual(0)) { + Pointer newAddress = sweeping ? objSeq : allocate(objSeqSize); + ObjectMoveInfo.setNewAddress(objSeq, newAddress); ObjectMoveInfo.setObjectSeqSize(objSeq, objSeqSize); - if (sweeping) { - ObjectMoveInfo.setNewAddress(objSeq, objSeq); - } else { - ObjectMoveInfo.setNewAddress(objSeq, allocPointer); - allocPointer = allocPointer.add(objSeqSize); // ensured enough memory above - } } if (sweeping) { @@ -209,4 +168,18 @@ public boolean visitChunk(AlignedHeapChunk.AlignedHeader chunk) { return true; } + + private Pointer allocate(UnsignedWord size) { + Pointer p = allocPointer; + allocPointer = allocPointer.add(size); + if (allocPointer.aboveThan(AlignedHeapChunk.getObjectsEnd(allocChunk))) { + allocChunk = HeapChunk.getNext(allocChunk); + assert allocChunk.isNonNull(); + assert !allocChunk.getShouldSweepInsteadOfCompact(); + + p = AlignedHeapChunk.getObjectsStart(allocChunk); + allocPointer = p.add(size); + } + return p; + } } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java index 8f0c3d18040e..4244e706095e 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/compacting/SweepingVisitor.java @@ -30,6 +30,8 @@ import org.graalvm.word.UnsignedWord; import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.genscavenge.AlignedHeapChunk; +import com.oracle.svm.core.genscavenge.HeapChunk; import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; import com.oracle.svm.core.heap.FillerArray; @@ -62,12 +64,15 @@ static int arrayBaseOffset() { @Override public boolean visit(Pointer objSeq, UnsignedWord size, Pointer newAddress, Pointer nextObjSeq) { + assert objSeq.equal(newAddress); if (nextObjSeq.isNonNull()) { Pointer gapStart = objSeq.add(size); - assert gapStart.belowOrEqual(nextObjSeq); - if (gapStart.notEqual(nextObjSeq)) { - writeFillerObjectAt(gapStart, nextObjSeq.subtract(gapStart)); - } + assert gapStart.belowThan(nextObjSeq); + writeFillerObjectAt(gapStart, nextObjSeq.subtract(gapStart)); + // Note that we have already added first object table entries for fillers during fixup. + } else { + AlignedHeapChunk.AlignedHeader chunk = AlignedHeapChunk.getEnclosingChunkFromObjectPointer(objSeq); + assert objSeq.add(size).equal(HeapChunk.getTopPointer(chunk)); } return true; }