Skip to content

Commit

Permalink
Runtime compressed refs work
Browse files Browse the repository at this point in the history
Add instance APIs to adjust the slot address in SlotObject.

Signed-off-by: Graham Chapman <[email protected]>
  • Loading branch information
gacholio committed May 19, 2020
1 parent 56abd35 commit 1da61d8
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 5 deletions.
20 changes: 20 additions & 0 deletions gc/base/SlotObject.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,26 @@ class GC_SlotObject
_slot = slot;
}

/**
* Advance the slot address by an integer offset
*
* @param[in] offset the offset to add
*/
MMINLINE void addToSlotAddress(intptr_t offset)
{
writeAddressToSlot(addToSlotAddress(readAddressFromSlot(), offset, compressObjectReferences()));
}

/**
* Back up the slot address by an integer offset
*
* @param[in] offset the offset to subtract
*/
MMINLINE void subtractFromSlotAddress(intptr_t offset)
{
writeAddressToSlot(subtractFromSlotAddress(readAddressFromSlot(), offset, compressObjectReferences()));
}

GC_SlotObject(OMR_VM *omrVM, volatile fomrobject_t* slot)
: _slot(slot)
#if defined (OMR_GC_COMPRESSED_POINTERS)
Expand Down
11 changes: 6 additions & 5 deletions gc/base/segregated/MemoryPoolSegregated.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
fomrobject_t *arrayoidPtr = _extensions->indexableObjectModel.getArrayoidPointer(spine);
Assert_MM_true(totalBytes >= spineBytes);
uintptr_t bytesRemaining = totalBytes - spineBytes;
bool const compressed = compressObjectReferences();
GC_SlotObject arrayletSlotObject(env->getOmrVM(), arrayoidPtr);
for (uintptr_t i=0; i<numberArraylets; i++) {
uintptr_t* arraylet = NULL;
if (0 < bytesRemaining) {
Expand All @@ -148,16 +148,17 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
/* allocation failed; release all storage include spine. */
env->getAllocationContext()->flush(env);

GC_SlotObject backoutSlotObject(env->getOmrVM(), arrayoidPtr);
for (uintptr_t j=0; j<i; j++) {
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, j, compressed));
arraylet = (uintptr_t*)slotObject.readReferenceFromSlot();
arraylet = (uintptr_t*)backoutSlotObject.readReferenceFromSlot();

MM_HeapRegionDescriptorSegregated *region = (MM_HeapRegionDescriptorSegregated *)regionManager->tableDescriptorForAddress(arraylet);
region->clearArraylet(region->whichArraylet(arraylet, arrayletLeafLogSize));
/* Arraylet backout means arraylets may be re-used before the next cycle, so we need to correct for
* their un-allocation
*/
region->addBytesFreedToArrayletBackout(env);
backoutSlotObject.addToSlotAddress(1);
}
MM_HeapRegionDescriptorSegregated *region = (MM_HeapRegionDescriptorSegregated *)regionManager->tableDescriptorForAddress((uintptr_t *)spine);
if (region->isSmall()) {
Expand All @@ -181,8 +182,8 @@ MM_MemoryPoolSegregated::allocateChunkedArray(MM_EnvironmentBase *env, MM_Alloca
*/
Assert_MM_true(i == numberArraylets - 1);
}
GC_SlotObject slotObject(env->getOmrVM(), GC_SlotObject::addToSlotAddress(arrayoidPtr, i, compressed));
slotObject.writeReferenceToSlot((omrobjectptr_t)arraylet);
arrayletSlotObject.writeReferenceToSlot((omrobjectptr_t)arraylet);
arrayletSlotObject.addToSlotAddress(1);
bytesRemaining = MM_Math::saturatingSubtract(bytesRemaining, arrayletLeafSize);
}
}
Expand Down

0 comments on commit 1da61d8

Please sign in to comment.