src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
Print this page
*** 690,699 ****
--- 690,703 ----
size_t
ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
return _cmsSpace->max_alloc_in_words() * HeapWordSize;
}
+ size_t ConcurrentMarkSweepGeneration::used_stable() const {
+ return cmsSpace()->used_stable();
+ }
+
size_t ConcurrentMarkSweepGeneration::max_available() const {
return free() + _virtual_space.uncommitted_size();
}
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
*** 1521,1530 ****
--- 1525,1536 ----
void CMSCollector::compute_new_size() {
assert_locked_or_safepoint(Heap_lock);
FreelistLocker z(this);
MetaspaceGC::compute_new_size();
_cmsGen->compute_new_size_free_list();
+ // recalculate CMS used space after CMS collection
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
// A work method used by the foreground collector to do
// a mark-sweep-compact.
void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
*** 1743,1752 ****
--- 1749,1759 ----
heap->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = heap->total_full_collections();
}
size_t prev_used = _cmsGen->used();
+ _cmsGen->cmsSpace()->recalculate_used_stable();
// The change of the collection state is normally done at this level;
// the exceptions are phases that are executed while the world is
// stopped. For those phases the change of state is done while the
// world is stopped. For baton passing purposes this allows the
*** 2049,2058 ****
--- 2056,2066 ----
void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
_capacity_at_prologue = capacity();
_used_at_prologue = used();
+ _cmsSpace->recalculate_used_stable();
// We enable promotion tracking so that card-scanning can recognize
// which objects have been promoted during this GC and skip them.
for (uint i = 0; i < ParallelGCThreads; i++) {
_par_gc_thread_states[i]->promo.startTrackingPromotions();
*** 2121,2130 ****
--- 2129,2139 ----
}
// reset _eden_chunk_array so sampling starts afresh
_eden_chunk_index = 0;
size_t cms_used = _cmsGen->cmsSpace()->used();
+ _cmsGen->cmsSpace()->recalculate_used_stable();
// update performance counters - this uses a special version of
// update_counters() that allows the utilization to be passed as a
// parameter, avoiding multiple calls to used().
//
*** 2814,2823 ****
--- 2823,2834 ----
checkpointRootsInitialWork();
// enable ("weak") refs discovery
rp->enable_discovery();
_collectorState = Marking;
}
+
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
void CMSCollector::checkpointRootsInitialWork() {
assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
assert(_collectorState == InitialMarking, "just checking");
*** 4175,4184 ****
--- 4186,4196 ----
}
FreelistLocker x(this);
MutexLocker y(bitMapLock(),
Mutex::_no_safepoint_check_flag);
checkpointRootsFinalWork();
+ _cmsGen->cmsSpace()->recalculate_used_stable();
}
verify_work_stacks_empty();
verify_overflow_empty();
}
*** 5335,5347 ****
--- 5347,5364 ----
// We need all the free list locks to make the abstract state
// transition from Sweeping to Resetting. See detailed note
// further below.
{
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
+
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
+
+ // recalculate CMS used space after CMS collection
+ _cmsGen->cmsSpace()->recalculate_used_stable();
+
_collectorState = Resizing;
}
}
verify_work_stacks_empty();
verify_overflow_empty();
*** 5426,5435 ****
--- 5443,5453 ----
bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
if (!full && current_is_young) {
// Gather statistics on the young generation collection.
collector()->stats().record_gc0_end(used());
}
+ _cmsSpace->recalculate_used_stable();
}
void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
// We iterate over the space(s) underlying this generation,
// checking the mark bit map to see if the bits corresponding