hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
Print this page
rev 611 : Merge
*** 191,201 ****
// Verify the assumption that FreeChunk::_prev and OopDesc::_klass
// offsets match. The ability to tell free chunks from objects
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
! assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
)
if (ParallelGCThreads > 0) {
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
--- 191,202 ----
// Verify the assumption that FreeChunk::_prev and OopDesc::_klass
// offsets match. The ability to tell free chunks from objects
// depends on this property.
debug_only(
FreeChunk* junk = NULL;
! assert(UseCompressedOops ||
! junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc");
)
if (ParallelGCThreads > 0) {
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
*** 226,235 ****
--- 227,264 ----
// note that all arithmetic is in units of HeapWords.
assert(MinChunkSize >= oopDesc::header_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert");
}
+
+ // The field "_initiating_occupancy" represents the occupancy percentage
+ // at which we trigger a new collection cycle. Unless explicitly specified
+ // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
+ // is calculated by:
+ //
+ // Let "f" be MinHeapFreeRatio in
+ //
+ // _intiating_occupancy = 100-f +
+ // f * (CMSTrigger[Perm]Ratio/100)
+ // where CMSTrigger[Perm]Ratio is the argument "tr" below.
+ //
+ // That is, if we assume the heap is at its desired maximum occupancy at the
+ // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
+ // space be allocated before initiating a new collection cycle.
+ //
+ void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
+ assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
+ if (io >= 0) {
+ _initiating_occupancy = (double)io / 100.0;
+ } else {
+ _initiating_occupancy = ((100 - MinHeapFreeRatio) +
+ (double)(tr * MinHeapFreeRatio) / 100.0)
+ / 100.0;
+ }
+ }
+
+
void ConcurrentMarkSweepGeneration::ref_processor_init() {
assert(collector() != NULL, "no collector");
collector()->ref_processor_init();
}
*** 510,519 ****
--- 539,549 ----
_survivor_plab_array(NULL), // -- ditto --
_survivor_chunk_array(NULL), // -- ditto --
_survivor_chunk_capacity(0), // -- ditto --
_survivor_chunk_index(0), // -- ditto --
_ser_pmc_preclean_ovflw(0),
+ _ser_kac_preclean_ovflw(0),
_ser_pmc_remark_ovflw(0),
_par_pmc_remark_ovflw(0),
_ser_kac_ovflw(0),
_par_kac_ovflw(0),
#ifndef PRODUCT
*** 524,535 ****
_icms_start_limit(NULL),
_icms_stop_limit(NULL),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false),
_collector_policy(cp),
! _unload_classes(false),
! _unloaded_classes_last_cycle(false),
_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
{
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true;
}
--- 554,565 ----
_icms_start_limit(NULL),
_icms_stop_limit(NULL),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false),
_collector_policy(cp),
! _should_unload_classes(false),
! _concurrent_cycles_since_last_unload(0),
_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
{
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true;
}
*** 641,670 ****
_hash_seed[i] = 17; // copied from ParNew
}
}
}
! // "initiatingOccupancy" is the occupancy ratio at which we trigger
! // a new collection cycle. Unless explicitly specified via
! // CMSTriggerRatio, it is calculated by:
! // Let "f" be MinHeapFreeRatio in
! //
! // intiatingOccupancy = 100-f +
! // f * (CMSTriggerRatio/100)
! // That is, if we assume the heap is at its desired maximum occupancy at the
! // end of a collection, we let CMSTriggerRatio of the (purported) free
! // space be allocated before initiating a new collection cycle.
! if (CMSInitiatingOccupancyFraction > 0) {
! _initiatingOccupancy = (double)CMSInitiatingOccupancyFraction / 100.0;
! } else {
! _initiatingOccupancy = ((100 - MinHeapFreeRatio) +
! (double)(CMSTriggerRatio *
! MinHeapFreeRatio) / 100.0)
! / 100.0;
! }
// Clip CMSBootstrapOccupancy between 0 and 100.
! _bootstrap_occupancy = ((double)MIN2((intx)100, MAX2((intx)0, CMSBootstrapOccupancy)))
/(double)100;
_full_gcs_since_conc_gc = 0;
// Now tell CMS generations the identity of their collector
--- 671,685 ----
_hash_seed[i] = 17; // copied from ParNew
}
}
}
! _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
! _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
!
// Clip CMSBootstrapOccupancy between 0 and 100.
! _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
/(double)100;
_full_gcs_since_conc_gc = 0;
// Now tell CMS generations the identity of their collector
*** 1027,1037 ****
_markBitMap.mark(start + 1); // object is potentially uninitialized?
_markBitMap.mark(start + size - 1);
// mark end of object
}
// check that oop looks uninitialized
! assert(oop(start)->klass() == NULL, "_klass should be NULL");
}
void CMSCollector::promoted(bool par, HeapWord* start,
bool is_obj_array, size_t obj_size) {
assert(_markBitMap.covers(start), "Out of bounds");
--- 1042,1052 ----
_markBitMap.mark(start + 1); // object is potentially uninitialized?
_markBitMap.mark(start + size - 1);
// mark end of object
}
// check that oop looks uninitialized
! assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
}
void CMSCollector::promoted(bool par, HeapWord* start,
bool is_obj_array, size_t obj_size) {
assert(_markBitMap.covers(start), "Out of bounds");
*** 1212,1222 ****
}
return NULL;
}
! oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo --
// delegate to underlying space.
assert_lock_strong(freelistLock());
--- 1227,1237 ----
}
return NULL;
}
! oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo --
// delegate to underlying space.
assert_lock_strong(freelistLock());
*** 1224,1245 ****
if (Universe::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
! oop res = _cmsSpace->promote(obj, obj_size, ref);
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
expand(s*HeapWordSize, MinHeapDeltaBytes,
CMSExpansionCause::_satisfy_promotion);
// Since there's currently no next generation, we don't try to promote
// into a more senior generation.
assert(next_gen() == NULL, "assumption, based upon which no attempt "
"is made to pass on a possibly failing "
"promotion to next generation");
! res = _cmsSpace->promote(obj, obj_size, ref);
}
if (res != NULL) {
// See comment in allocate() about when objects should
// be allocated live.
assert(obj->is_oop(), "Will dereference klass pointer below");
--- 1239,1260 ----
if (Universe::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT
! oop res = _cmsSpace->promote(obj, obj_size);
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
expand(s*HeapWordSize, MinHeapDeltaBytes,
CMSExpansionCause::_satisfy_promotion);
// Since there's currently no next generation, we don't try to promote
// into a more senior generation.
assert(next_gen() == NULL, "assumption, based upon which no attempt "
"is made to pass on a possibly failing "
"promotion to next generation");
! res = _cmsSpace->promote(obj, obj_size);
}
if (res != NULL) {
// See comment in allocate() about when objects should
// be allocated live.
assert(obj->is_oop(), "Will dereference klass pointer below");
*** 1297,1325 ****
if (obj_ptr == NULL) {
return NULL;
}
}
oop obj = oop(obj_ptr);
! assert(obj->klass() == NULL, "Object should be uninitialized here.");
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
HeapWord* old_ptr = (HeapWord*)old;
if (word_sz > (size_t)oopDesc::header_size()) {
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
obj_ptr + oopDesc::header_size(),
word_sz - oopDesc::header_size());
}
// Restore the mark word copied above.
obj->set_mark(m);
// Now we can track the promoted object, if necessary. We take care
// To delay the transition from uninitialized to full object
// (i.e., insertion of klass pointer) until after, so that it
// atomically becomes a promoted object.
if (promoInfo->tracking()) {
promoInfo->track((PromotedObject*)obj, old->klass());
}
! // Finally, install the klass pointer.
obj->set_klass(old->klass());
assert(old->is_oop(), "Will dereference klass ptr below");
collector()->promoted(true, // parallel
obj_ptr, old->is_objArray(), word_sz);
--- 1312,1349 ----
if (obj_ptr == NULL) {
return NULL;
}
}
oop obj = oop(obj_ptr);
! assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
+ // Except with compressed oops it's the mark word.
HeapWord* old_ptr = (HeapWord*)old;
if (word_sz > (size_t)oopDesc::header_size()) {
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
obj_ptr + oopDesc::header_size(),
word_sz - oopDesc::header_size());
}
+
+ if (UseCompressedOops) {
+ // Copy gap missed by (aligned) header size calculation above
+ obj->set_klass_gap(old->klass_gap());
+ }
+
// Restore the mark word copied above.
obj->set_mark(m);
+
// Now we can track the promoted object, if necessary. We take care
// To delay the transition from uninitialized to full object
// (i.e., insertion of klass pointer) until after, so that it
// atomically becomes a promoted object.
if (promoInfo->tracking()) {
promoInfo->track((PromotedObject*)obj, old->klass());
}
!
! // Finally, install the klass pointer (this should be volatile).
obj->set_klass(old->klass());
assert(old->is_oop(), "Will dereference klass ptr below");
collector()->promoted(true, // parallel
obj_ptr, old->is_objArray(), word_sz);
*** 1412,1422 ****
gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
_cmsGen->contiguous_available());
gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
! gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", initiatingOccupancy());
}
// ------------------------------------------------------------------
// If the estimated time to complete a cms collection (cms_duration())
// is less than the estimated time remaining until the cms generation
--- 1436,1447 ----
gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
_cmsGen->contiguous_available());
gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
! gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
! gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
}
// ------------------------------------------------------------------
// If the estimated time to complete a cms collection (cms_duration())
// is less than the estimated time remaining until the cms generation
*** 1445,1505 ****
// Otherwise, we start a collection cycle if either the perm gen or
// old gen want a collection cycle started. Each may use
// an appropriate criterion for making this decision.
// XXX We need to make sure that the gen expansion
! // criterion dovetails well with this.
! if (_cmsGen->shouldConcurrentCollect(initiatingOccupancy())) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS old gen initiated");
}
return true;
}
! if (cms_should_unload_classes() &&
! _permGen->shouldConcurrentCollect(initiatingOccupancy())) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS perm gen initiated");
}
return true;
}
!
return false;
}
// Clear _expansion_cause fields of constituent generations
void CMSCollector::clear_expansion_cause() {
_cmsGen->clear_expansion_cause();
_permGen->clear_expansion_cause();
}
! bool ConcurrentMarkSweepGeneration::shouldConcurrentCollect(
! double initiatingOccupancy) {
! // We should be conservative in starting a collection cycle. To
! // start too eagerly runs the risk of collecting too often in the
! // extreme. To collect too rarely falls back on full collections,
! // which works, even if not optimum in terms of concurrent work.
! // As a work around for too eagerly collecting, use the flag
! // UseCMSInitiatingOccupancyOnly. This also has the advantage of
! // giving the user an easily understandable way of controlling the
! // collections.
! // We want to start a new collection cycle if any of the following
! // conditions hold:
! // . our current occupancy exceeds the initiating occupancy, or
! // . we recently needed to expand and have not since that expansion,
! // collected, or
! // . we are not using adaptive free lists and linear allocation is
! // going to fail, or
! // . (for old gen) incremental collection has already failed or
! // may soon fail in the near future as we may not be able to absorb
! // promotions.
! assert_lock_strong(freelistLock());
! if (occupancy() > initiatingOccupancy) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
! short_name(), occupancy(), initiatingOccupancy);
}
return true;
}
if (UseCMSInitiatingOccupancyOnly) {
return false;
--- 1470,1548 ----
// Otherwise, we start a collection cycle if either the perm gen or
// old gen want a collection cycle started. Each may use
// an appropriate criterion for making this decision.
// XXX We need to make sure that the gen expansion
! // criterion dovetails well with this. XXX NEED TO FIX THIS
! if (_cmsGen->should_concurrent_collect()) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS old gen initiated");
}
return true;
}
! // We start a collection if we believe an incremental collection may fail;
! // this is not likely to be productive in practice because it's probably too
! // late anyway.
! GenCollectedHeap* gch = GenCollectedHeap::heap();
! assert(gch->collector_policy()->is_two_generation_policy(),
! "You may want to check the correctness of the following");
! if (gch->incremental_collection_will_fail()) {
! if (PrintGCDetails && Verbose) {
! gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
! }
! return true;
! }
!
! if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
! bool res = update_should_unload_classes();
! if (res) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print_cr("CMS perm gen initiated");
}
return true;
}
! }
return false;
}
// Clear _expansion_cause fields of constituent generations
void CMSCollector::clear_expansion_cause() {
_cmsGen->clear_expansion_cause();
_permGen->clear_expansion_cause();
}
! // We should be conservative in starting a collection cycle. To
! // start too eagerly runs the risk of collecting too often in the
! // extreme. To collect too rarely falls back on full collections,
! // which works, even if not optimum in terms of concurrent work.
! // As a work around for too eagerly collecting, use the flag
! // UseCMSInitiatingOccupancyOnly. This also has the advantage of
! // giving the user an easily understandable way of controlling the
! // collections.
! // We want to start a new collection cycle if any of the following
! // conditions hold:
! // . our current occupancy exceeds the configured initiating occupancy
! // for this generation, or
! // . we recently needed to expand this space and have not, since that
! // expansion, done a collection of this generation, or
! // . the underlying space believes that it may be a good idea to initiate
! // a concurrent collection (this may be based on criteria such as the
! // following: the space uses linear allocation and linear allocation is
! // going to fail, or there is believed to be excessive fragmentation in
! // the generation, etc... or ...
! // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
! // the case of the old generation, not the perm generation; see CR 6543076):
! // we may be approaching a point at which allocation requests may fail because
! // we will be out of sufficient free space given allocation rate estimates.]
! bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
! assert_lock_strong(freelistLock());
! if (occupancy() > initiating_occupancy()) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
! short_name(), occupancy(), initiating_occupancy());
}
return true;
}
if (UseCMSInitiatingOccupancyOnly) {
return false;
*** 1509,1532 ****
gclog_or_tty->print(" %s: collect because expanded for allocation ",
short_name());
}
return true;
}
! GenCollectedHeap* gch = GenCollectedHeap::heap();
! assert(gch->collector_policy()->is_two_generation_policy(),
! "You may want to check the correctness of the following");
! if (gch->incremental_collection_will_fail()) {
if (PrintGCDetails && Verbose) {
! gclog_or_tty->print(" %s: collect because incremental collection will fail ",
! short_name());
! }
! return true;
! }
! if (!_cmsSpace->adaptive_freelists() &&
! _cmsSpace->linearAllocationWouldFail()) {
! if (PrintGCDetails && Verbose) {
! gclog_or_tty->print(" %s: collect because of linAB ",
short_name());
}
return true;
}
return false;
--- 1552,1564 ----
gclog_or_tty->print(" %s: collect because expanded for allocation ",
short_name());
}
return true;
}
! if (_cmsSpace->should_concurrent_collect()) {
if (PrintGCDetails && Verbose) {
! gclog_or_tty->print(" %s: collect because cmsSpace says so ",
short_name());
}
return true;
}
return false;
*** 1930,1939 ****
--- 1962,1972 ----
// Temporarily make refs discovery atomic
ReferenceProcessorAtomicMutator w(ref_processor(), true);
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery();
+ ref_processor()->setup_policy(clear_all_soft_refs);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
// collection, clear the _modUnionTable.
assert(_collectorState != Idling || _modUnionTable.isAllClear(),
"_modUnionTable should be clear if the baton was not passed");
*** 1969,1980 ****
_collectorState = Resetting;
assert(_restart_addr == NULL,
"Should have been NULL'd before baton was passed");
reset(false /* == !asynch */);
_cmsGen->reset_after_compaction();
! if (verifying() && !cms_should_unload_classes()) {
perm_gen_verify_bit_map()->clear_all();
}
// Clear any data recorded in the PLAB chunk arrays.
if (_survivor_plab_array != NULL) {
--- 2002,2014 ----
_collectorState = Resetting;
assert(_restart_addr == NULL,
"Should have been NULL'd before baton was passed");
reset(false /* == !asynch */);
_cmsGen->reset_after_compaction();
+ _concurrent_cycles_since_last_unload = 0;
! if (verifying() && !should_unload_classes()) {
perm_gen_verify_bit_map()->clear_all();
}
// Clear any data recorded in the PLAB chunk arrays.
if (_survivor_plab_array != NULL) {
*** 2097,2106 ****
--- 2131,2141 ----
GenCollectedHeap* gch = GenCollectedHeap::heap();
{
bool safepoint_check = Mutex::_no_safepoint_check_flag;
MutexLockerEx hl(Heap_lock, safepoint_check);
+ FreelistLocker fll(this);
MutexLockerEx x(CGC_lock, safepoint_check);
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
// The foreground collector is active or we're
// not using asynchronous collections. Skip this
// background collection.
*** 2111,2127 ****
_collectorState = InitialMarking;
// Reset the expansion cause, now that we are about to begin
// a new cycle.
clear_expansion_cause();
}
! _unloaded_classes_last_cycle = cms_should_unload_classes(); // ... from last cycle
! // This controls class unloading in response to an explicit gc request.
! // If ExplicitGCInvokesConcurrentAndUnloadsClasses is set, then
! // we will unload classes even if CMSClassUnloadingEnabled is not set.
! // See CR 6541037 and related CRs.
! _unload_classes = _full_gc_requested // ... for this cycle
! && ExplicitGCInvokesConcurrentAndUnloadsClasses;
_full_gc_requested = false; // acks all outstanding full gc requests
// Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = gch->total_full_collections();
}
--- 2146,2158 ----
_collectorState = InitialMarking;
// Reset the expansion cause, now that we are about to begin
// a new cycle.
clear_expansion_cause();
}
! // Decide if we want to enable class unloading as part of the
! // ensuing concurrent GC cycle.
! update_should_unload_classes();
_full_gc_requested = false; // acks all outstanding full gc requests
// Signal that we are about to start a collection
gch->increment_total_full_collections(); // ... starting a collection cycle
_collection_count_start = gch->total_full_collections();
}
*** 2355,2364 ****
--- 2386,2398 ----
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
Universe::verify(true);
}
+ // Snapshot the soft reference policy to be used in this collection cycle.
+ ref_processor()->setup_policy(clear_all_soft_refs);
+
bool init_mark_was_synchronous = false; // until proven otherwise
while (_collectorState != Idling) {
if (TraceCMSState) {
gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
Thread::current(), _collectorState);
*** 2733,2749 ****
bool _failed;
public:
VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
! void do_bit(size_t offset) {
HeapWord* addr = _marks->offsetToHeapWord(offset);
if (!_marks->isMarked(addr)) {
oop(addr)->print();
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
_failed = true;
}
}
bool failed() { return _failed; }
};
--- 2767,2784 ----
bool _failed;
public:
VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
! bool do_bit(size_t offset) {
HeapWord* addr = _marks->offsetToHeapWord(offset);
if (!_marks->isMarked(addr)) {
oop(addr)->print();
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
_failed = true;
}
+ return true;
}
bool failed() { return _failed; }
};
*** 3046,3070 ****
assert(overflow_list_is_empty(), "Overflow list should be empty");
assert(no_preserved_marks(), "No preserved marks");
}
#endif // PRODUCT
void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit;
const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
| SharedHeap::SO_CodeCache;
! if (cms_should_unload_classes()) { // Should unload classes this cycle
remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time
}
// Not unloading classes this cycle
! assert(!cms_should_unload_classes(), "Inconsitency!");
! if ((!verifying() || cms_unloaded_classes_last_cycle()) && should_verify) {
// We were not verifying, or we _were_ unloading classes in the last cycle,
// AND some verification options are enabled this cycle; in this case,
// we must make sure that the deadness map is allocated if not already so,
// and cleared (if already allocated previously --
// CMSBitMap::sizeInBits() is used to determine if it's allocated).
--- 3081,3146 ----
assert(overflow_list_is_empty(), "Overflow list should be empty");
assert(no_preserved_marks(), "No preserved marks");
}
#endif // PRODUCT
+ // Decide if we want to enable class unloading as part of the
+ // ensuing concurrent GC cycle. We will collect the perm gen and
+ // unload classes if it's the case that:
+ // (1) an explicit gc request has been made and the flag
+ // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
+ // (2) (a) class unloading is enabled at the command line, and
+ // (b) (i) perm gen threshold has been crossed, or
+ // (ii) old gen is getting really full, or
+ // (iii) the previous N CMS collections did not collect the
+ // perm gen
+ // NOTE: Provided there is no change in the state of the heap between
+ // calls to this method, it should have idempotent results. Moreover,
+ // its results should be monotonically increasing (i.e. going from 0 to 1,
+ // but not 1 to 0) between successive calls between which the heap was
+ // not collected. For the implementation below, it must thus rely on
+ // the property that concurrent_cycles_since_last_unload()
+ // will not decrease unless a collection cycle happened and that
+ // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
+ // themselves also monotonic in that sense. See check_monotonicity()
+ // below.
+ bool CMSCollector::update_should_unload_classes() {
+ _should_unload_classes = false;
+ // Condition 1 above
+ if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
+ _should_unload_classes = true;
+ } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
+ // Disjuncts 2.b.(i,ii,iii) above
+ _should_unload_classes = (concurrent_cycles_since_last_unload() >=
+ CMSClassUnloadingMaxInterval)
+ || _permGen->should_concurrent_collect()
+ || _cmsGen->is_too_full();
+ }
+ return _should_unload_classes;
+ }
+
+ bool ConcurrentMarkSweepGeneration::is_too_full() const {
+ bool res = should_concurrent_collect();
+ res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
+ return res;
+ }
+
void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit;
const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings
| SharedHeap::SO_CodeCache;
! if (should_unload_classes()) { // Should unload classes this cycle
remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time
}
// Not unloading classes this cycle
! assert(!should_unload_classes(), "Inconsitency!");
! if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
// We were not verifying, or we _were_ unloading classes in the last cycle,
// AND some verification options are enabled this cycle; in this case,
// we must make sure that the deadness map is allocated if not already so,
// and cleared (if already allocated previously --
// CMSBitMap::sizeInBits() is used to determine if it's allocated).
*** 3126,3160 ****
}
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
CMSExpansionCause::Cause cause)
{
- assert_locked_or_safepoint(Heap_lock);
! size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
! size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
! bool success = false;
! if (aligned_expand_bytes > aligned_bytes) {
! success = grow_by(aligned_expand_bytes);
! }
! if (!success) {
! success = grow_by(aligned_bytes);
! }
! if (!success) {
! size_t remaining_bytes = _virtual_space.uncommitted_size();
! if (remaining_bytes > 0) {
! success = grow_by(remaining_bytes);
! }
! }
! if (GC_locker::is_active()) {
! if (PrintGC && Verbose) {
! gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
! }
! }
// remember why we expanded; this information is used
// by shouldConcurrentCollect() when making decisions on whether to start
// a new CMS cycle.
if (success) {
set_expansion_cause(cause);
--- 3202,3221 ----
}
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
+ bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
+ return CardGeneration::expand(bytes, expand_bytes);
+ }
+
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
CMSExpansionCause::Cause cause)
{
! bool success = expand(bytes, expand_bytes);
!
// remember why we expanded; this information is used
// by shouldConcurrentCollect() when making decisions on whether to start
// a new CMS cycle.
if (success) {
set_expansion_cause(cause);
*** 3777,3787 ****
}
assert(work_q->size() == 0, "Shouldn't steal");
MutexLockerEx ml(ovflw_stk->par_lock(),
Mutex::_no_safepoint_check_flag);
// Grab up to 1/4 the size of the work queue
! size_t num = MIN2((size_t)work_q->max_elems()/4,
(size_t)ParGCDesiredObjsFromOverflowList);
num = MIN2(num, ovflw_stk->length());
for (int i = (int) num; i > 0; i--) {
oop cur = ovflw_stk->pop();
assert(cur != NULL, "Counted wrong?");
--- 3838,3848 ----
}
assert(work_q->size() == 0, "Shouldn't steal");
MutexLockerEx ml(ovflw_stk->par_lock(),
Mutex::_no_safepoint_check_flag);
// Grab up to 1/4 the size of the work queue
! size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
(size_t)ParGCDesiredObjsFromOverflowList);
num = MIN2(num, ovflw_stk->length());
for (int i = (int) num; i > 0; i--) {
oop cur = ovflw_stk->pop();
assert(cur != NULL, "Counted wrong?");
*** 3888,3929 ****
// task.
pst->all_tasks_completed();
}
class Par_ConcMarkingClosure: public OopClosure {
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
OopTaskQueue* _work_queue;
!
public:
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
_collector(collector),
_span(_collector->_span),
_work_queue(work_queue),
_bit_map(bit_map),
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
!
! void do_oop(oop* p);
void trim_queue(size_t max);
void handle_stack_overflow(HeapWord* lost);
};
// Grey object scanning during work stealing phase --
// the salient assumption here is that any references
// that are in these stolen objects being scanned must
// already have been initialized (else they would not have
// been published), so we do not need to check for
// uninitialized objects before pushing here.
! void Par_ConcMarkingClosure::do_oop(oop* p) {
! oop this_oop = *p;
! assert(this_oop->is_oop_or_null(true),
! "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)this_oop;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
// If we manage to "claim" the object, by being the
--- 3949,3990 ----
// task.
pst->all_tasks_completed();
}
class Par_ConcMarkingClosure: public OopClosure {
+ private:
CMSCollector* _collector;
MemRegion _span;
CMSBitMap* _bit_map;
CMSMarkStack* _overflow_stack;
CMSMarkStack* _revisit_stack; // XXXXXX Check proper use
OopTaskQueue* _work_queue;
! protected:
! DO_OOP_WORK_DEFN
public:
Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
_collector(collector),
_span(_collector->_span),
_work_queue(work_queue),
_bit_map(bit_map),
_overflow_stack(overflow_stack) { } // need to initialize revisit stack etc.
! virtual void do_oop(oop* p);
! virtual void do_oop(narrowOop* p);
void trim_queue(size_t max);
void handle_stack_overflow(HeapWord* lost);
};
// Grey object scanning during work stealing phase --
// the salient assumption here is that any references
// that are in these stolen objects being scanned must
// already have been initialized (else they would not have
// been published), so we do not need to check for
// uninitialized objects before pushing here.
! void Par_ConcMarkingClosure::do_oop(oop obj) {
! assert(obj->is_oop_or_null(true), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
// If we manage to "claim" the object, by being the
*** 3938,3948 ****
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
! !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _overflow_stack->capacity());
}
--- 3999,4009 ----
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
! !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _overflow_stack->capacity());
}
*** 3955,3964 ****
--- 4016,4028 ----
}
} // Else, some other thread got there first
}
}
+ void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+ void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
+
void Par_ConcMarkingClosure::trim_queue(size_t max) {
while (_work_queue->size() > max) {
oop new_oop;
if (_work_queue->pop_local(new_oop)) {
assert(new_oop->is_oop(), "Should be an oop");
*** 4330,4343 ****
if (clean_refs) {
ReferenceProcessor* rp = ref_processor();
CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
! &_markStack);
CMSDrainMarkingStackClosure complete_trace(this,
_span, &_markBitMap, &_markStack,
! &keep_alive);
// We don't want this step to interfere with a young
// collection because we don't want to take CPU
// or memory bandwidth away from the young GC threads
// (which may be as many as there are CPUs).
--- 4394,4407 ----
if (clean_refs) {
ReferenceProcessor* rp = ref_processor();
CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
! &_markStack, true /* preclean */);
CMSDrainMarkingStackClosure complete_trace(this,
_span, &_markBitMap, &_markStack,
! &keep_alive, true /* preclean */);
// We don't want this step to interfere with a young
// collection because we don't want to take CPU
// or memory bandwidth away from the young GC threads
// (which may be as many as there are CPUs).
*** 4532,4546 ****
// We'll scan the cards in the dirty region (with periodic
// yields for foreground GC as needed).
if (!dirtyRegion.is_empty()) {
assert(numDirtyCards > 0, "consistency check");
HeapWord* stop_point = NULL;
- {
stopTimer();
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
bitMapLock());
startTimer();
verify_work_stacks_empty();
verify_overflow_empty();
sample_eden();
stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
--- 4596,4610 ----
// We'll scan the cards in the dirty region (with periodic
// yields for foreground GC as needed).
if (!dirtyRegion.is_empty()) {
assert(numDirtyCards > 0, "consistency check");
HeapWord* stop_point = NULL;
stopTimer();
CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
bitMapLock());
startTimer();
+ {
verify_work_stacks_empty();
verify_overflow_empty();
sample_eden();
stop_point =
gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
*** 4553,4566 ****
// cards. We'll either restart at the next block boundary or
// abort the preclean.
assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
(_collectorState == AbortablePreclean && should_abort_preclean()),
"Unparsable objects should only be in perm gen.");
-
- stopTimer();
- CMSTokenSyncWithLocks ts(true, bitMapLock());
- startTimer();
_modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
if (should_abort_preclean()) {
break; // out of preclean loop
} else {
// Compute the next address at which preclean should pick up;
--- 4617,4626 ----
*** 4612,4623 ****
stopTimer();
CMSTokenSync x(true); // is cms thread
startTimer();
sample_eden();
// Get and clear dirty region from card table
! dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
! MemRegion(nextAddr, endAddr));
assert(dirtyRegion.start() >= nextAddr,
"returned region inconsistent?");
}
lastAddr = dirtyRegion.end();
numDirtyCards =
--- 4672,4686 ----
stopTimer();
CMSTokenSync x(true); // is cms thread
startTimer();
sample_eden();
// Get and clear dirty region from card table
! dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
! MemRegion(nextAddr, endAddr),
! true,
! CardTableModRefBS::precleaned_card_val());
!
assert(dirtyRegion.start() >= nextAddr,
"returned region inconsistent?");
}
lastAddr = dirtyRegion.end();
numDirtyCards =
*** 4721,4731 ****
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
! if (cms_should_unload_classes()) {
CodeCache::gc_prologue();
}
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
--- 4784,4794 ----
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
! if (should_unload_classes()) {
CodeCache::gc_prologue();
}
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
*** 4781,4811 ****
refProcessingWork(asynch, clear_all_soft_refs);
}
verify_work_stacks_empty();
verify_overflow_empty();
! if (cms_should_unload_classes()) {
CodeCache::gc_epilogue();
}
// If we encountered any (marking stack / work queue) overflow
// events during the current CMS cycle, take appropriate
// remedial measures, where possible, so as to try and avoid
// recurrence of that condition.
assert(_markStack.isEmpty(), "No grey objects");
size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
! _ser_kac_ovflw;
if (ser_ovflw > 0) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Marking stack overflow (benign) "
! "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
_ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
! _ser_kac_ovflw);
}
_markStack.expand();
_ser_pmc_remark_ovflw = 0;
_ser_pmc_preclean_ovflw = 0;
_ser_kac_ovflw = 0;
}
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Work queue overflow (benign) "
--- 4844,4876 ----
refProcessingWork(asynch, clear_all_soft_refs);
}
verify_work_stacks_empty();
verify_overflow_empty();
! if (should_unload_classes()) {
CodeCache::gc_epilogue();
}
// If we encountered any (marking stack / work queue) overflow
// events during the current CMS cycle, take appropriate
// remedial measures, where possible, so as to try and avoid
// recurrence of that condition.
assert(_markStack.isEmpty(), "No grey objects");
size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
! _ser_kac_ovflw + _ser_kac_preclean_ovflw;
if (ser_ovflw > 0) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Marking stack overflow (benign) "
! "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
! ", kac_preclean="SIZE_FORMAT")",
_ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
! _ser_kac_ovflw, _ser_kac_preclean_ovflw);
}
_markStack.expand();
_ser_pmc_remark_ovflw = 0;
_ser_pmc_preclean_ovflw = 0;
+ _ser_kac_preclean_ovflw = 0;
_ser_kac_ovflw = 0;
}
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Work queue overflow (benign) "
*** 5130,5146 ****
int* seed) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
CMSBitMap* bm = &(_collector->_markBitMap);
- size_t num_from_overflow_list =
- MIN2((size_t)work_q->max_elems()/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
while (true) {
// Completely finish any left over work from (an) earlier round(s)
cl->trim_queue(0);
// Now check if there's any work in the overflow list
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
work_q)) {
// found something in global overflow list;
// not yet ready to go stealing work from others.
--- 5195,5210 ----
int* seed) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
CMSBitMap* bm = &(_collector->_markBitMap);
while (true) {
// Completely finish any left over work from (an) earlier round(s)
cl->trim_queue(0);
+ size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
+ (size_t)ParGCDesiredObjsFromOverflowList);
// Now check if there's any work in the overflow list
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
work_q)) {
// found something in global overflow list;
// not yet ready to go stealing work from others.
*** 5381,5392 ****
NULL, // space is set further below
&_markBitMap, &_markStack, &_revisitStack,
&mrias_cl);
{
TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
! // Iterate over the dirty cards, marking them precleaned, and
! // setting the corresponding bits in the mod union table.
{
ModUnionClosure modUnionClosure(&_modUnionTable);
_ct->ct_bs()->dirty_card_iterate(
_cmsGen->used_region(),
&modUnionClosure);
--- 5445,5456 ----
NULL, // space is set further below
&_markBitMap, &_markStack, &_revisitStack,
&mrias_cl);
{
TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
! // Iterate over the dirty cards, setting the corresponding bits in the
! // mod union table.
{
ModUnionClosure modUnionClosure(&_modUnionTable);
_ct->ct_bs()->dirty_card_iterate(
_cmsGen->used_region(),
&modUnionClosure);
*** 5482,5491 ****
--- 5546,5556 ----
_term(total_workers, task_queues)
{
assert(_collector->_span.equals(_span) && !_span.is_empty(),
"Inconsistency in _span");
}
+
OopTaskQueueSet* task_queues() { return _task_queues; }
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
ParallelTaskTerminator* terminator() { return &_term; }
*** 5547,5563 ****
CMSParKeepAliveClosure* keep_alive,
int* seed) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
- size_t num_from_overflow_list =
- MIN2((size_t)work_q->max_elems()/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
while (true) {
// Completely finish any left over work from (an) earlier round(s)
drain->trim_queue(0);
// Now check if there's any work in the overflow list
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
work_q)) {
// Found something in global overflow list;
// not yet ready to go stealing work from others.
--- 5612,5627 ----
CMSParKeepAliveClosure* keep_alive,
int* seed) {
OopTaskQueue* work_q = work_queue(i);
NOT_PRODUCT(int num_steals = 0;)
oop obj_to_scan;
while (true) {
// Completely finish any left over work from (an) earlier round(s)
drain->trim_queue(0);
+ size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
+ (size_t)ParGCDesiredObjsFromOverflowList);
// Now check if there's any work in the overflow list
if (_collector->par_take_from_overflow_list(num_from_overflow_list,
work_q)) {
// Found something in global overflow list;
// not yet ready to go stealing work from others.
*** 5613,5664 ****
void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
ResourceMark rm;
HandleMark hm;
- ReferencePolicy* soft_ref_policy;
! assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
// Process weak references.
! if (clear_all_soft_refs) {
! soft_ref_policy = new AlwaysClearPolicy();
! } else {
! #ifdef COMPILER2
! soft_ref_policy = new LRUMaxHeapPolicy();
! #else
! soft_ref_policy = new LRUCurrentHeapPolicy();
! #endif // COMPILER2
! }
verify_work_stacks_empty();
- ReferenceProcessor* rp = ref_processor();
- assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
! &_markStack);
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
_span, &_markBitMap, &_markStack,
! &cmsKeepAliveClosure);
{
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
if (rp->processing_is_mt()) {
CMSRefProcTaskExecutor task_executor(*this);
! rp->process_discovered_references(soft_ref_policy,
! &_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
&task_executor);
} else {
! rp->process_discovered_references(soft_ref_policy,
! &_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
NULL);
}
verify_work_stacks_empty();
}
! if (cms_should_unload_classes()) {
{
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
// Follow SystemDictionary roots and unload classes
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
--- 5677,5717 ----
void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
ResourceMark rm;
HandleMark hm;
! ReferenceProcessor* rp = ref_processor();
! assert(rp->span().equals(_span), "Spans should be equal");
! assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
// Process weak references.
! rp->setup_policy(clear_all_soft_refs);
verify_work_stacks_empty();
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
! &_markStack, false /* !preclean */);
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
_span, &_markBitMap, &_markStack,
! &cmsKeepAliveClosure, false /* !preclean */);
{
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
if (rp->processing_is_mt()) {
CMSRefProcTaskExecutor task_executor(*this);
! rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
&task_executor);
} else {
! rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
NULL);
}
verify_work_stacks_empty();
}
! if (should_unload_classes()) {
{
TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
// Follow SystemDictionary roots and unload classes
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
*** 5757,5775 ****
// PermGen verification support: If perm gen sweeping is disabled in
// this cycle, we preserve the perm gen object "deadness" information
// in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects.
! if (verifying() && !cms_should_unload_classes()) {
! CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
! bitMapLock());
assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
"Should have already been allocated");
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
markBitMap(), perm_gen_verify_bit_map());
_permGen->cmsSpace()->blk_iterate(&mdo);
}
if (asynch) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
// First sweep the old gen then the perm gen
--- 5810,5834 ----
// PermGen verification support: If perm gen sweeping is disabled in
// this cycle, we preserve the perm gen object "deadness" information
// in the perm_gen_verify_bit_map. In order to do that we traverse
// all blocks in perm gen and mark all dead objects.
! if (verifying() && !should_unload_classes()) {
assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
"Should have already been allocated");
MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
markBitMap(), perm_gen_verify_bit_map());
+ if (asynch) {
+ CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
+ bitMapLock());
+ _permGen->cmsSpace()->blk_iterate(&mdo);
+ } else {
+ // In the case of synchronous sweep, we already have
+ // the requisite locks/tokens.
_permGen->cmsSpace()->blk_iterate(&mdo);
}
+ }
if (asynch) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
// First sweep the old gen then the perm gen
*** 5778,5788 ****
bitMapLock());
sweepWork(_cmsGen, asynch);
}
// Now repeat for perm gen
! if (cms_should_unload_classes()) {
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
bitMapLock());
sweepWork(_permGen, asynch);
}
--- 5837,5847 ----
bitMapLock());
sweepWork(_cmsGen, asynch);
}
// Now repeat for perm gen
! if (should_unload_classes()) {
CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
bitMapLock());
sweepWork(_permGen, asynch);
}
*** 5800,5810 ****
}
} else {
// already have needed locks
sweepWork(_cmsGen, asynch);
! if (cms_should_unload_classes()) {
sweepWork(_permGen, asynch);
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
--- 5859,5869 ----
}
} else {
// already have needed locks
sweepWork(_cmsGen, asynch);
! if (should_unload_classes()) {
sweepWork(_permGen, asynch);
}
// Update heap occupancy information which is used as
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();
*** 5962,5971 ****
--- 6021,6035 ----
// destructor; so, do not remove this scope, else the
// end-of-sweep-census below will be off by a little bit.
}
gen->cmsSpace()->sweep_completed();
gen->cmsSpace()->endSweepFLCensus(sweepCount());
+ if (should_unload_classes()) { // unloaded classes this cycle,
+ _concurrent_cycles_since_last_unload = 0; // ... reset count
+ } else { // did not unload classes,
+ _concurrent_cycles_since_last_unload++; // ... increment count
+ }
}
// Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle.
void CMSCollector::reset(bool asynch) {
*** 6120,6130 ****
}
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
size_t sz = 0;
oop p = (oop)addr;
! if (p->klass() != NULL && p->is_parsable()) {
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
} else {
sz = block_size_using_printezis_bits(addr);
}
assert(sz > 0, "size must be nonzero");
--- 6184,6194 ----
}
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
size_t sz = 0;
oop p = (oop)addr;
! if (p->klass_or_null() != NULL && p->is_parsable()) {
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
} else {
sz = block_size_using_printezis_bits(addr);
}
assert(sz > 0, "size must be nonzero");
*** 6142,6152 ****
// Construct a CMS bit map infrastructure, but don't create the
// bit vector itself. That is done by a separate call CMSBitMap::allocate()
// further below.
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
! _bm(NULL,0),
_shifter(shifter),
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
{
_bmStartWord = 0;
_bmWordSize = 0;
--- 6206,6216 ----
// Construct a CMS bit map infrastructure, but don't create the
// bit vector itself. That is done by a separate call CMSBitMap::allocate()
// further below.
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
! _bm(),
_shifter(shifter),
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
{
_bmStartWord = 0;
_bmWordSize = 0;
*** 6167,6177 ****
warning("CMS bit map backing store failure");
return false;
}
assert(_virtual_space.committed_size() == brs.size(),
"didn't reserve backing store for all of CMS bit map?");
! _bm.set_map((uintptr_t*)_virtual_space.low());
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
_bmWordSize, "inconsistency in bit map sizing");
_bm.set_size(_bmWordSize >> _shifter);
// bm.clear(); // can we rely on getting zero'd memory? verify below
--- 6231,6241 ----
warning("CMS bit map backing store failure");
return false;
}
assert(_virtual_space.committed_size() == brs.size(),
"didn't reserve backing store for all of CMS bit map?");
! _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
_bmWordSize, "inconsistency in bit map sizing");
_bm.set_size(_bmWordSize >> _shifter);
// bm.clear(); // can we rely on getting zero'd memory? verify below
*** 6322,6344 ****
{
assert(_ref_processor == NULL, "deliberately left NULL");
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
! void MarkRefsIntoClosure::do_oop(oop* p) {
// if p points into _span, then mark corresponding bit in _markBitMap
! oop thisOop = *p;
! if (thisOop != NULL) {
! assert(thisOop->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)thisOop;
if (_span.contains(addr)) {
// this should be made more efficient
_bitMap->mark(addr);
}
- }
}
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
bool should_do_nmethods):
_span(span),
--- 6386,6408 ----
{
assert(_ref_processor == NULL, "deliberately left NULL");
assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
! void MarkRefsIntoClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
! assert(obj->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr)) {
// this should be made more efficient
_bitMap->mark(addr);
}
}
+ void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+ void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
+
// A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
bool should_do_nmethods):
_span(span),
*** 6347,6373 ****
_should_do_nmethods(should_do_nmethods) {
assert(_ref_processor == NULL, "deliberately left NULL");
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
! void MarkRefsIntoVerifyClosure::do_oop(oop* p) {
// if p points into _span, then mark corresponding bit in _markBitMap
! oop this_oop = *p;
! if (this_oop != NULL) {
! assert(this_oop->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr)) {
_verification_bm->mark(addr);
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
! gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
fatal("... aborting");
}
}
- }
}
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
//////////////////////////////////////////////////
MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
--- 6411,6437 ----
_should_do_nmethods(should_do_nmethods) {
assert(_ref_processor == NULL, "deliberately left NULL");
assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
! void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
// if p points into _span, then mark corresponding bit in _markBitMap
! assert(obj->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr)) {
_verification_bm->mark(addr);
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
! gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
fatal("... aborting");
}
}
}
+ void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+ void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
+
//////////////////////////////////////////////////
// MarkRefsIntoAndScanClosure
//////////////////////////////////////////////////
MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
*** 6398,6422 ****
// the unmarked oops. It is also used during the concurrent precleaning
// phase while scanning objects on dirty cards in the CMS generation.
// The marks are made in the marking bit map and the marking stack is
// used for keeping the (newly) grey objects during the scan.
// The parallel version (Par_...) appears further below.
! void MarkRefsIntoAndScanClosure::do_oop(oop* p) {
! oop this_oop = *p;
! if (this_oop != NULL) {
! assert(this_oop->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)this_oop;
! assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
! assert(_collector->overflow_list_is_empty(), "should be empty");
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object is now grey)
_bit_map->mark(addr);
// push on marking stack (stack should be empty), and drain the
// stack by applying this closure to the oops in the oops popped
// from the stack (i.e. blacken the grey objects)
! bool res = _mark_stack->push(this_oop);
assert(res, "Should have space to push on empty stack");
do {
oop new_oop = _mark_stack->pop();
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
assert(new_oop->is_parsable(), "Found unparsable oop");
--- 6462,6486 ----
// the unmarked oops. It is also used during the concurrent precleaning
// phase while scanning objects on dirty cards in the CMS generation.
// The marks are made in the marking bit map and the marking stack is
// used for keeping the (newly) grey objects during the scan.
// The parallel version (Par_...) appears further below.
! void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
! if (obj != NULL) {
! assert(obj->is_oop(), "expected an oop");
! HeapWord* addr = (HeapWord*)obj;
! assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
! assert(_collector->overflow_list_is_empty(),
! "overflow list should be empty");
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object is now grey)
_bit_map->mark(addr);
// push on marking stack (stack should be empty), and drain the
// stack by applying this closure to the oops in the oops popped
// from the stack (i.e. blacken the grey objects)
! bool res = _mark_stack->push(obj);
assert(res, "Should have space to push on empty stack");
do {
oop new_oop = _mark_stack->pop();
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
assert(new_oop->is_parsable(), "Found unparsable oop");
*** 6448,6457 ****
--- 6512,6524 ----
assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
"All preserved marks should have been restored above");
}
}
+ void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
void MarkRefsIntoAndScanClosure::do_yield_work() {
assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"CMS thread should hold CMS token");
assert_lock_strong(_freelistLock);
assert_lock_strong(_bit_map->lock());
*** 6466,6478 ****
_collector->incrementYields();
}
_collector->icms_wait();
// See the comment in coordinator_yield()
! for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
! !CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
--- 6533,6547 ----
_collector->incrementYields();
}
_collector->icms_wait();
// See the comment in coordinator_yield()
! for (unsigned i = 0;
! i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
! !CMSCollector::foregroundGCIsActive();
! ++i) {
os::sleep(Thread::current(), 1, false);
ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
*** 6505,6521 ****
// the unmarked oops. The marks are made in the marking bit map and
// the work_queue is used for keeping the (newly) grey objects during
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
! void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) {
! oop this_oop = *p;
! if (this_oop != NULL) {
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
! assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop");
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object will become grey):
// It is possible for several threads to be
// trying to "claim" this object concurrently;
--- 6574,6589 ----
// the unmarked oops. The marks are made in the marking bit map and
// the work_queue is used for keeping the (newly) grey objects during
// the scan phase whence they are also available for stealing by parallel
// threads. Since the marking bit map is shared, updates are
// synchronized (via CAS).
! void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
! if (obj != NULL) {
// Ignore mark word because this could be an already marked oop
// that may be chained at the end of the overflow list.
! assert(obj->is_oop(true), "expected an oop");
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// mark bit map (object will become grey):
// It is possible for several threads to be
// trying to "claim" this object concurrently;
*** 6525,6542 ****
if (_bit_map->par_mark(addr)) {
// push on work_queue (which may not be empty), and trim the
// queue to an appropriate length by applying this closure to
// the oops in the oops popped from the stack (i.e. blacken the
// grey objects)
! bool res = _work_queue->push(this_oop);
assert(res, "Low water mark should be less than capacity?");
trim_queue(_low_water_mark);
} // Else, another thread claimed the object
}
}
}
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
oop p, MemRegion mr) {
--- 6593,6613 ----
if (_bit_map->par_mark(addr)) {
// push on work_queue (which may not be empty), and trim the
// queue to an appropriate length by applying this closure to
// the oops in the oops popped from the stack (i.e. blacken the
// grey objects)
! bool res = _work_queue->push(obj);
assert(res, "Low water mark should be less than capacity?");
trim_queue(_low_water_mark);
} // Else, another thread claimed the object
}
}
}
+ void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+ void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+
// This closure is used to rescan the marked objects on the dirty cards
// in the mod union table and the card table proper.
size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
oop p, MemRegion mr) {
*** 6550,6560 ****
// and we have been asked to abort this ongoing preclean cycle.
return 0;
}
if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized?
! if (p->klass() != NULL) {
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
// Signal precleaning to redirty the card since
// the klass pointer is already installed.
assert(size == 0, "Initial value");
} else {
--- 6621,6631 ----
// and we have been asked to abort this ongoing preclean cycle.
return 0;
}
if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized?
! if (p->klass_or_null() != NULL) {
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
// Signal precleaning to redirty the card since
// the klass pointer is already installed.
assert(size == 0, "Initial value");
} else {
*** 6563,6577 ****
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
if (p->is_objArray()) {
// objArrays are precisely marked; restrict scanning
// to dirty cards only.
! size = p->oop_iterate(_scanningClosure, mr);
! assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
! "adjustObjectSize should be the identity for array sizes, "
! "which are necessarily larger than minimum object size of "
! "two heap words");
} else {
// A non-array may have been imprecisely marked; we need
// to scan object in its entirety.
size = CompactibleFreeListSpace::adjustObjectSize(
p->oop_iterate(_scanningClosure));
--- 6634,6645 ----
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
if (p->is_objArray()) {
// objArrays are precisely marked; restrict scanning
// to dirty cards only.
! size = CompactibleFreeListSpace::adjustObjectSize(
! p->oop_iterate(_scanningClosure, mr));
} else {
// A non-array may have been imprecisely marked; we need
// to scan object in its entirety.
size = CompactibleFreeListSpace::adjustObjectSize(
p->oop_iterate(_scanningClosure));
*** 6601,6611 ****
// will dirty the card when the klass pointer is installed in the
// object (signalling the completion of initialization).
}
} else {
// Either a not yet marked object or an uninitialized object
! if (p->klass() == NULL || !p->is_parsable()) {
// An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet.
assert(size == 0, "Initial value");
} else {
// An object not (yet) reached by marking: we merely need to
--- 6669,6679 ----
// will dirty the card when the klass pointer is installed in the
// object (signalling the completion of initialization).
}
} else {
// Either a not yet marked object or an uninitialized object
! if (p->klass_or_null() == NULL || !p->is_parsable()) {
// An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet.
assert(size == 0, "Initial value");
} else {
// An object not (yet) reached by marking: we merely need to
*** 6658,6668 ****
size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
HeapWord* addr = (HeapWord*)p;
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
assert(!_span.contains(addr), "we are scanning the survivor spaces");
! assert(p->klass() != NULL, "object should be initializd");
assert(p->is_parsable(), "must be parsable.");
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
// Note that we do not yield while we iterate over
--- 6726,6736 ----
size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
HeapWord* addr = (HeapWord*)p;
DEBUG_ONLY(_collector->verify_work_stacks_empty();)
assert(!_span.contains(addr), "we are scanning the survivor spaces");
! assert(p->klass_or_null() != NULL, "object should be initializd");
assert(p->is_parsable(), "must be parsable.");
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
// Note that we do not yield while we iterate over
*** 6801,6814 ****
(intptr_t)_finger, CardTableModRefBS::card_size);
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! void MarkFromRootsClosure::do_bit(size_t offset) {
if (_skipBits > 0) {
_skipBits--;
! return;
}
// convert offset into a HeapWord*
HeapWord* addr = _bitMap->startWord() + offset;
assert(_bitMap->endWord() && addr < _bitMap->endWord(),
"address out of range");
--- 6869,6882 ----
(intptr_t)_finger, CardTableModRefBS::card_size);
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! bool MarkFromRootsClosure::do_bit(size_t offset) {
if (_skipBits > 0) {
_skipBits--;
! return true;
}
// convert offset into a HeapWord*
HeapWord* addr = _bitMap->startWord() + offset;
assert(_bitMap->endWord() && addr < _bitMap->endWord(),
"address out of range");
*** 6816,6826 ****
if (_bitMap->isMarked(addr+1)) {
// this is an allocated but not yet initialized object
assert(_skipBits == 0, "tautology");
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
! if (p->klass() == NULL || !p->is_parsable()) {
DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object
// during the preclean or remark phase. (CMSCleanOnEnter)
if (CMSCleanOnEnter) {
--- 6884,6894 ----
if (_bitMap->isMarked(addr+1)) {
// this is an allocated but not yet initialized object
assert(_skipBits == 0, "tautology");
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
! if (p->klass_or_null() == NULL || !p->is_parsable()) {
DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object
// during the preclean or remark phase. (CMSCleanOnEnter)
if (CMSCleanOnEnter) {
*** 6836,6855 ****
assert(_threshold <= end_card_addr,
"Because we are just scanning into this object");
if (_threshold < end_card_addr) {
_threshold = end_card_addr;
}
! if (p->klass() != NULL) {
// Redirty the range of cards...
_mut->mark_range(redirty_range);
} // ...else the setting of klass will dirty the card anyway.
}
DEBUG_ONLY(})
! return;
}
}
scanOopsInOop(addr);
}
// We take a break if we've been at this for a while,
// so as to avoid monopolizing the locks involved.
void MarkFromRootsClosure::do_yield_work() {
--- 6904,6924 ----
assert(_threshold <= end_card_addr,
"Because we are just scanning into this object");
if (_threshold < end_card_addr) {
_threshold = end_card_addr;
}
! if (p->klass_or_null() != NULL) {
// Redirty the range of cards...
_mut->mark_range(redirty_range);
} // ...else the setting of klass will dirty the card anyway.
}
DEBUG_ONLY(})
! return true;
}
}
scanOopsInOop(addr);
+ return true;
}
// We take a break if we've been at this for a while,
// so as to avoid monopolizing the locks involved.
void MarkFromRootsClosure::do_yield_work() {
*** 6886,6902 ****
void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
assert(_bitMap->isMarked(ptr), "expected bit to be set");
assert(_markStack->isEmpty(),
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
! oop this_oop = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
! assert(this_oop->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = ptr + this_oop->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
--- 6955,6971 ----
void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
assert(_bitMap->isMarked(ptr), "expected bit to be set");
assert(_markStack->isEmpty(),
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
! oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
! assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
*** 6938,6948 ****
// the stack below.
PushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bitMap, _markStack,
_revisitStack,
_finger, this);
! bool res = _markStack->push(this_oop);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_markStack->isEmpty()) {
oop new_oop = _markStack->pop();
// Skip verifying header mark word below because we are
// running concurrent with mutators.
--- 7007,7017 ----
// the stack below.
PushOrMarkClosure pushOrMarkClosure(_collector,
_span, _bitMap, _markStack,
_revisitStack,
_finger, this);
! bool res = _markStack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_markStack->isEmpty()) {
oop new_oop = _markStack->pop();
// Skip verifying header mark word below because we are
// running concurrent with mutators.
*** 6979,6992 ****
assert(_span.contains(_finger), "Out of bounds _finger?");
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! void Par_MarkFromRootsClosure::do_bit(size_t offset) {
if (_skip_bits > 0) {
_skip_bits--;
! return;
}
// convert offset into a HeapWord*
HeapWord* addr = _bit_map->startWord() + offset;
assert(_bit_map->endWord() && addr < _bit_map->endWord(),
"address out of range");
--- 7048,7061 ----
assert(_span.contains(_finger), "Out of bounds _finger?");
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
if (_skip_bits > 0) {
_skip_bits--;
! return true;
}
// convert offset into a HeapWord*
HeapWord* addr = _bit_map->startWord() + offset;
assert(_bit_map->endWord() && addr < _bit_map->endWord(),
"address out of range");
*** 6994,7026 ****
if (_bit_map->isMarked(addr+1)) {
// this is an allocated object that might not yet be initialized
assert(_skip_bits == 0, "tautology");
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
! if (p->klass() == NULL || !p->is_parsable()) {
// in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold.
! return;
}
}
scan_oops_in_oop(addr);
}
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
assert(_bit_map->isMarked(ptr), "expected bit to be set");
// Should we assert that our work queue is empty or
// below some drain limit?
assert(_work_queue->size() == 0,
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
! oop this_oop = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
! assert(this_oop->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = ptr + this_oop->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
--- 7063,7096 ----
if (_bit_map->isMarked(addr+1)) {
// this is an allocated object that might not yet be initialized
assert(_skip_bits == 0, "tautology");
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
! if (p->klass_or_null() == NULL || !p->is_parsable()) {
// in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold.
! return true;
}
}
scan_oops_in_oop(addr);
+ return true;
}
void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
assert(_bit_map->isMarked(ptr), "expected bit to be set");
// Should we assert that our work queue is empty or
// below some drain limit?
assert(_work_queue->size() == 0,
"should drain stack to limit stack usage");
// convert ptr to an oop preparatory to scanning
! oop obj = oop(ptr);
// Ignore mark word in verification below, since we
// may be running concurrent with mutators.
! assert(obj->is_oop(true), "should be an oop");
assert(_finger <= ptr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
// the marking phase (especially if running iCMS). During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
*** 7064,7074 ****
_work_queue,
_overflow_stack,
_revisit_stack,
_finger,
gfa, this);
! bool res = _work_queue->push(this_oop); // overflow could occur here
assert(res, "Will hold once we use workqueues");
while (true) {
oop new_oop;
if (!_work_queue->pop_local(new_oop)) {
// We emptied our work_queue; check if there's stuff that can
--- 7134,7144 ----
_work_queue,
_overflow_stack,
_revisit_stack,
_finger,
gfa, this);
! bool res = _work_queue->push(obj); // overflow could occur here
assert(res, "Will hold once we use workqueues");
while (true) {
oop new_oop;
if (!_work_queue->pop_local(new_oop)) {
// We emptied our work_queue; check if there's stuff that can
*** 7123,7160 ****
_finger = addr;
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
// convert offset into a HeapWord*
HeapWord* addr = _verification_bm->startWord() + offset;
assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
"address out of range");
assert(_verification_bm->isMarked(addr), "tautology");
assert(_cms_bm->isMarked(addr), "tautology");
assert(_mark_stack->isEmpty(),
"should drain stack to limit stack usage");
// convert addr to an oop preparatory to scanning
! oop this_oop = oop(addr);
! assert(this_oop->is_oop(), "should be an oop");
assert(_finger <= addr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = addr + this_oop->size();
assert(_finger > addr, "we just incremented it above");
// Note: the finger doesn't advance while we drain
// the stack below.
! bool res = _mark_stack->push(this_oop);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_mark_stack->isEmpty()) {
oop new_oop = _mark_stack->pop();
assert(new_oop->is_oop(), "Oops! expected to pop an oop");
// now scan this oop's oops
new_oop->oop_iterate(&_pam_verify_closure);
}
assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
}
PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
CMSCollector* collector, MemRegion span,
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
--- 7193,7231 ----
_finger = addr;
}
// Should revisit to see if this should be restructured for
// greater efficiency.
! bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
// convert offset into a HeapWord*
HeapWord* addr = _verification_bm->startWord() + offset;
assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
"address out of range");
assert(_verification_bm->isMarked(addr), "tautology");
assert(_cms_bm->isMarked(addr), "tautology");
assert(_mark_stack->isEmpty(),
"should drain stack to limit stack usage");
// convert addr to an oop preparatory to scanning
! oop obj = oop(addr);
! assert(obj->is_oop(), "should be an oop");
assert(_finger <= addr, "_finger runneth ahead");
// advance the finger to right end of this object
! _finger = addr + obj->size();
assert(_finger > addr, "we just incremented it above");
// Note: the finger doesn't advance while we drain
// the stack below.
! bool res = _mark_stack->push(obj);
assert(res, "Empty non-zero size stack should have space for single push");
while (!_mark_stack->isEmpty()) {
oop new_oop = _mark_stack->pop();
assert(new_oop->is_oop(), "Oops! expected to pop an oop");
// now scan this oop's oops
new_oop->oop_iterate(&_pam_verify_closure);
}
assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
+ return true;
}
PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
CMSCollector* collector, MemRegion span,
CMSBitMap* verification_bm, CMSBitMap* cms_bm,
*** 7165,7174 ****
--- 7236,7247 ----
_verification_bm(verification_bm),
_cms_bm(cms_bm),
_mark_stack(mark_stack)
{ }
+ void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
+ void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
// Upon stack overflow, we discard (part of) the stack,
// remembering the least address amongst those discarded
// in CMSCollector's _restart_address.
void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
*** 7177,7200 ****
_collector->lower_restart_addr(ra);
_mark_stack->reset(); // discard stack contents
_mark_stack->expand(); // expand the stack if possible
}
! void PushAndMarkVerifyClosure::do_oop(oop* p) {
! oop this_oop = *p;
! assert(this_oop->is_oop_or_null(), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_verification_bm->mark(addr); // now grey
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
! gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
fatal("... aborting");
}
! if (!_mark_stack->push(this_oop)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _mark_stack->capacity());
}
assert(_mark_stack->isFull(), "Else push should have succeeded");
--- 7250,7273 ----
_collector->lower_restart_addr(ra);
_mark_stack->reset(); // discard stack contents
_mark_stack->expand(); // expand the stack if possible
}
! void PushAndMarkVerifyClosure::do_oop(oop obj) {
! assert(obj->is_oop_or_null(), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_verification_bm->mark(addr); // now grey
if (!_cms_bm->isMarked(addr)) {
oop(addr)->print();
! gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
! addr);
fatal("... aborting");
}
! if (!_mark_stack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _mark_stack->capacity());
}
assert(_mark_stack->isFull(), "Else push should have succeeded");
*** 7217,7227 ****
_bitMap(bitMap),
_markStack(markStack),
_revisitStack(revisitStack),
_finger(finger),
_parent(parent),
! _should_remember_klasses(collector->cms_should_unload_classes())
{ }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
--- 7290,7300 ----
_bitMap(bitMap),
_markStack(markStack),
_revisitStack(revisitStack),
_finger(finger),
_parent(parent),
! _should_remember_klasses(collector->should_unload_classes())
{ }
Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
MemRegion span,
CMSBitMap* bit_map,
*** 7240,7250 ****
_overflow_stack(overflow_stack),
_revisit_stack(revisit_stack),
_finger(finger),
_global_finger_addr(global_finger_addr),
_parent(parent),
! _should_remember_klasses(collector->cms_should_unload_classes())
{ }
// Assumes thread-safe access by callers, who are
// responsible for mutual exclusion.
void CMSCollector::lower_restart_addr(HeapWord* low) {
--- 7313,7323 ----
_overflow_stack(overflow_stack),
_revisit_stack(revisit_stack),
_finger(finger),
_global_finger_addr(global_finger_addr),
_parent(parent),
! _should_remember_klasses(collector->should_unload_classes())
{ }
// Assumes thread-safe access by callers, who are
// responsible for mutual exclusion.
void CMSCollector::lower_restart_addr(HeapWord* low) {
*** 7280,7295 ****
_collector->lower_restart_addr(ra);
_overflow_stack->reset(); // discard stack contents
_overflow_stack->expand(); // expand the stack if possible
}
!
! void PushOrMarkClosure::do_oop(oop* p) {
! oop thisOop = *p;
// Ignore mark word because we are running concurrent with mutators.
! assert(thisOop->is_oop_or_null(true), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)thisOop;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_bitMap->mark(addr); // now grey
if (addr < _finger) {
// the bit map iteration has already either passed, or
--- 7353,7366 ----
_collector->lower_restart_addr(ra);
_overflow_stack->reset(); // discard stack contents
_overflow_stack->expand(); // expand the stack if possible
}
! void PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
! assert(obj->is_oop_or_null(true), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
_bitMap->mark(addr); // now grey
if (addr < _finger) {
// the bit map iteration has already either passed, or
*** 7301,7311 ****
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _markStack->capacity());
}
assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
--- 7372,7382 ----
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _markStack->capacity());
}
assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
*** 7317,7331 ****
// bit map
do_yield_check();
}
}
! void Par_PushOrMarkClosure::do_oop(oop* p) {
! oop this_oop = *p;
// Ignore mark word because we are running concurrent with mutators.
! assert(this_oop->is_oop_or_null(true), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)this_oop;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
// We read the global_finger (volatile read) strictly after marking oop
bool res = _bit_map->par_mark(addr); // now grey
volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
--- 7388,7404 ----
// bit map
do_yield_check();
}
}
! void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
! void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
!
! void Par_PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
! assert(obj->is_oop_or_null(true), "expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
// We read the global_finger (volatile read) strictly after marking oop
bool res = _bit_map->par_mark(addr); // now grey
volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
*** 7350,7360 ****
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
! !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _overflow_stack->capacity());
}
--- 7423,7433 ----
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow ||
! !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
// stack overflow
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
SIZE_FORMAT, _overflow_stack->capacity());
}
*** 7367,7376 ****
--- 7440,7451 ----
}
do_yield_check();
}
}
+ void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+ void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
MemRegion span,
ReferenceProcessor* rp,
CMSBitMap* bit_map,
*** 7384,7410 ****
_bit_map(bit_map),
_mod_union_table(mod_union_table),
_mark_stack(mark_stack),
_revisit_stack(revisit_stack),
_concurrent_precleaning(concurrent_precleaning),
! _should_remember_klasses(collector->cms_should_unload_classes())
{
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
// Grey object rescan during pre-cleaning and second checkpoint phases --
// the non-parallel version (the parallel version appears further below.)
! void PushAndMarkClosure::do_oop(oop* p) {
! oop this_oop = *p;
! // Ignore mark word verification. If during concurrent precleaning
// the object monitor may be locked. If during the checkpoint
// phases, the object may already have been reached by a different
// path and may be at the end of the global overflow list (so
// the mark word may be NULL).
! assert(this_oop->is_oop_or_null(true/* ignore mark word */),
"expected an oop or NULL");
! HeapWord* addr = (HeapWord*)this_oop;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
_bit_map->mark(addr); // ... now grey
--- 7459,7484 ----
_bit_map(bit_map),
_mod_union_table(mod_union_table),
_mark_stack(mark_stack),
_revisit_stack(revisit_stack),
_concurrent_precleaning(concurrent_precleaning),
! _should_remember_klasses(collector->should_unload_classes())
{
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
// Grey object rescan during pre-cleaning and second checkpoint phases --
// the non-parallel version (the parallel version appears further below.)
! void PushAndMarkClosure::do_oop(oop obj) {
! // Ignore mark word verification. If during concurrent precleaning,
// the object monitor may be locked. If during the checkpoint
// phases, the object may already have been reached by a different
// path and may be at the end of the global overflow list (so
// the mark word may be NULL).
! assert(obj->is_oop_or_null(true /* ignore mark word */),
"expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
_bit_map->mark(addr); // ... now grey
*** 7415,7437 ****
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_mark_stack->push(this_oop)) {
if (_concurrent_precleaning) {
// During precleaning we can just dirty the appropriate card(s)
// in the mod union table, thus ensuring that the object remains
// in the grey set and continue. In the case of object arrays
// we need to dirty all of the cards that the object spans,
// since the rescan of object arrays will be limited to the
// dirty cards.
// Note that no one can be intefering with us in this action
// of dirtying the mod union table, so no locking or atomics
// are required.
! if (this_oop->is_objArray()) {
! size_t sz = this_oop->size();
HeapWord* end_card_addr = (HeapWord*)round_to(
(intptr_t)(addr+sz), CardTableModRefBS::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_mod_union_table->mark_range(redirty_range);
--- 7489,7511 ----
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_mark_stack->push(obj)) {
if (_concurrent_precleaning) {
// During precleaning we can just dirty the appropriate card(s)
// in the mod union table, thus ensuring that the object remains
// in the grey set and continue. In the case of object arrays
// we need to dirty all of the cards that the object spans,
// since the rescan of object arrays will be limited to the
// dirty cards.
// Note that no one can be intefering with us in this action
// of dirtying the mod union table, so no locking or atomics
// are required.
! if (obj->is_objArray()) {
! size_t sz = obj->size();
HeapWord* end_card_addr = (HeapWord*)round_to(
(intptr_t)(addr+sz), CardTableModRefBS::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_mod_union_table->mark_range(redirty_range);
*** 7440,7450 ****
}
_collector->_ser_pmc_preclean_ovflw++;
} else {
// During the remark phase, we need to remember this oop
// in the overflow list.
! _collector->push_on_overflow_list(this_oop);
_collector->_ser_pmc_remark_ovflw++;
}
}
}
}
--- 7514,7524 ----
}
_collector->_ser_pmc_preclean_ovflw++;
} else {
// During the remark phase, we need to remember this oop
// in the overflow list.
! _collector->push_on_overflow_list(obj);
_collector->_ser_pmc_remark_ovflw++;
}
}
}
}
*** 7459,7477 ****
_collector(collector),
_span(span),
_bit_map(bit_map),
_work_queue(work_queue),
_revisit_stack(revisit_stack),
! _should_remember_klasses(collector->cms_should_unload_classes())
{
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
// Grey object rescan during second checkpoint phase --
// the parallel version.
! void Par_PushAndMarkClosure::do_oop(oop* p) {
! oop this_oop = *p;
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
// been hijacked for chaining into the overflow stack --
// if this is the last object in the overflow stack then
--- 7533,7553 ----
_collector(collector),
_span(span),
_bit_map(bit_map),
_work_queue(work_queue),
_revisit_stack(revisit_stack),
! _should_remember_klasses(collector->should_unload_classes())
{
assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
}
+ void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
+ void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
+
// Grey object rescan during second checkpoint phase --
// the parallel version.
! void Par_PushAndMarkClosure::do_oop(oop obj) {
// In the assert below, we ignore the mark word because
// this oop may point to an already visited object that is
// on the overflow stack (in which case the mark word has
// been hijacked for chaining into the overflow stack --
// if this is the last object in the overflow stack then
*** 7479,7491 ****
// have been subsequently popped off the global overflow
// stack, and the mark word possibly restored to the prototypical
// value, by the time we get to examined this failing assert in
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
! assert(this_oop->is_oop_or_null(true),
"expected an oop or NULL");
! HeapWord* addr = (HeapWord*)this_oop;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
// If we manage to "claim" the object, by being the
--- 7555,7567 ----
// have been subsequently popped off the global overflow
// stack, and the mark word possibly restored to the prototypical
// value, by the time we get to examined this failing assert in
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
! assert(obj->is_oop_or_null(true),
"expected an oop or NULL");
! HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
// a white object ...
// If we manage to "claim" the object, by being the
*** 7499,7516 ****
_collector->par_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_work_queue->push(this_oop)) {
! _collector->par_push_on_overflow_list(this_oop);
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
}
} // Else, some other thread got there first
}
}
void PushAndMarkClosure::remember_klass(Klass* k) {
if (!_revisit_stack->push(oop(k))) {
fatal("Revisit stack overflowed in PushAndMarkClosure");
}
}
--- 7575,7595 ----
_collector->par_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_work_queue->push(obj)) {
! _collector->par_push_on_overflow_list(obj);
_collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
}
} // Else, some other thread got there first
}
}
+ void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+ void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+
void PushAndMarkClosure::remember_klass(Klass* k) {
if (!_revisit_stack->push(oop(k))) {
fatal("Revisit stack overflowed in PushAndMarkClosure");
}
}
*** 7980,7991 ****
size = pointer_delta(nextOneAddr + 1, addr);
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
"alignment problem");
#ifdef DEBUG
! if (oop(addr)->klass() != NULL &&
! ( !_collector->cms_should_unload_classes()
|| oop(addr)->is_parsable())) {
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
assert(size ==
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
--- 8059,8070 ----
size = pointer_delta(nextOneAddr + 1, addr);
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
"alignment problem");
#ifdef DEBUG
! if (oop(addr)->klass_or_null() != NULL &&
! ( !_collector->should_unload_classes()
|| oop(addr)->is_parsable())) {
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
assert(size ==
CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
*** 7993,8004 ****
}
#endif
} else {
// This should be an initialized object that's alive.
! assert(oop(addr)->klass() != NULL &&
! (!_collector->cms_should_unload_classes()
|| oop(addr)->is_parsable()),
"Should be an initialized object");
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
// Verify that the bit map has no bits marked between
--- 8072,8083 ----
}
#endif
} else {
// This should be an initialized object that's alive.
! assert(oop(addr)->klass_or_null() != NULL &&
! (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()),
"Should be an initialized object");
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
// Verify that the bit map has no bits marked between
*** 8200,8212 ****
return addr != NULL &&
(!_span.contains(addr) || _bit_map->isMarked(addr));
}
// CMSKeepAliveClosure: the serial version
! void CMSKeepAliveClosure::do_oop(oop* p) {
! oop this_oop = *p;
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
_bit_map->mark(addr);
bool simulate_overflow = false;
NOT_PRODUCT(
--- 8279,8290 ----
return addr != NULL &&
(!_span.contains(addr) || _bit_map->isMarked(addr));
}
// CMSKeepAliveClosure: the serial version
! void CMSKeepAliveClosure::do_oop(oop obj) {
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
_bit_map->mark(addr);
bool simulate_overflow = false;
NOT_PRODUCT(
*** 8214,8251 ****
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_mark_stack->push(this_oop)) {
! _collector->push_on_overflow_list(this_oop);
_collector->_ser_kac_ovflw++;
}
}
}
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
! void CMSParKeepAliveClosure::do_oop(oop* p) {
! oop this_oop = *p;
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// In general, during recursive tracing, several threads
// may be concurrently getting here; the first one to
// "tag" it, claims it.
if (_bit_map->par_mark(addr)) {
! bool res = _work_queue->push(this_oop);
assert(res, "Low water mark should be much less than capacity");
// Do a recursive trim in the hope that this will keep
// stack usage lower, but leave some oops for potential stealers
trim_queue(_low_water_mark);
} // Else, another thread got there first
}
}
void CMSParKeepAliveClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop new_oop;
if (_work_queue->pop_local(new_oop)) {
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
--- 8292,8355 ----
_collector->simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_mark_stack->push(obj)) {
! if (_concurrent_precleaning) {
! // We dirty the overflown object and let the remark
! // phase deal with it.
! assert(_collector->overflow_list_is_empty(), "Error");
! // In the case of object arrays, we need to dirty all of
! // the cards that the object spans. No locking or atomics
! // are needed since no one else can be mutating the mod union
! // table.
! if (obj->is_objArray()) {
! size_t sz = obj->size();
! HeapWord* end_card_addr =
! (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
! MemRegion redirty_range = MemRegion(addr, end_card_addr);
! assert(!redirty_range.is_empty(), "Arithmetical tautology");
! _collector->_modUnionTable.mark_range(redirty_range);
! } else {
! _collector->_modUnionTable.mark(addr);
! }
! _collector->_ser_kac_preclean_ovflw++;
! } else {
! _collector->push_on_overflow_list(obj);
_collector->_ser_kac_ovflw++;
}
}
+ }
}
+ void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+ void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
+
// CMSParKeepAliveClosure: a parallel version of the above.
// The work queues are private to each closure (thread),
// but (may be) available for stealing by other threads.
! void CMSParKeepAliveClosure::do_oop(oop obj) {
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
// In general, during recursive tracing, several threads
// may be concurrently getting here; the first one to
// "tag" it, claims it.
if (_bit_map->par_mark(addr)) {
! bool res = _work_queue->push(obj);
assert(res, "Low water mark should be much less than capacity");
// Do a recursive trim in the hope that this will keep
// stack usage lower, but leave some oops for potential stealers
trim_queue(_low_water_mark);
} // Else, another thread got there first
}
}
+ void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+ void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
+
void CMSParKeepAliveClosure::trim_queue(uint max) {
while (_work_queue->size() > max) {
oop new_oop;
if (_work_queue->pop_local(new_oop)) {
assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
*** 8257,8269 ****
new_oop->oop_iterate(&_mark_and_push);
}
}
}
! void CMSInnerParMarkAndPushClosure::do_oop(oop* p) {
! oop this_oop = *p;
! HeapWord* addr = (HeapWord*)this_oop;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
if (_bit_map->par_mark(addr)) {
bool simulate_overflow = false;
NOT_PRODUCT(
--- 8361,8372 ----
new_oop->oop_iterate(&_mark_and_push);
}
}
}
! void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
! HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) &&
!_bit_map->isMarked(addr)) {
if (_bit_map->par_mark(addr)) {
bool simulate_overflow = false;
NOT_PRODUCT(
*** 8271,8288 ****
_collector->par_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_work_queue->push(this_oop)) {
! _collector->par_push_on_overflow_list(this_oop);
_collector->_par_kac_ovflw++;
}
} // Else another thread got there already
}
}
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////
const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
switch (cause) {
--- 8374,8394 ----
_collector->par_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
! if (simulate_overflow || !_work_queue->push(obj)) {
! _collector->par_push_on_overflow_list(obj);
_collector->_par_kac_ovflw++;
}
} // Else another thread got there already
}
}
+ void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+ void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
+
//////////////////////////////////////////////////////////////////
// CMSExpansionCause /////////////////////////////
//////////////////////////////////////////////////////////////////
const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
switch (cause) {
*** 8306,8324 ****
}
void CMSDrainMarkingStackClosure::do_void() {
// the max number to take from overflow list at a time
const size_t num = _mark_stack->capacity()/4;
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
_collector->take_from_overflow_list(num, _mark_stack)) {
! oop this_oop = _mark_stack->pop();
! HeapWord* addr = (HeapWord*)this_oop;
assert(_span.contains(addr), "Should be within span");
assert(_bit_map->isMarked(addr), "Should be marked");
! assert(this_oop->is_oop(), "Should be an oop");
! this_oop->oop_iterate(_keep_alive);
}
}
void CMSParDrainMarkingStackClosure::do_void() {
// drain queue
--- 8412,8432 ----
}
void CMSDrainMarkingStackClosure::do_void() {
// the max number to take from overflow list at a time
const size_t num = _mark_stack->capacity()/4;
+ assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
+ "Overflow list should be NULL during concurrent phases");
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
_collector->take_from_overflow_list(num, _mark_stack)) {
! oop obj = _mark_stack->pop();
! HeapWord* addr = (HeapWord*)obj;
assert(_span.contains(addr), "Should be within span");
assert(_bit_map->isMarked(addr), "Should be marked");
! assert(obj->is_oop(), "Should be an oop");
! obj->oop_iterate(_keep_alive);
}
}
void CMSParDrainMarkingStackClosure::do_void() {
// drain queue
*** 8799,8809 ****
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
! size_t num = MIN2((size_t)_mark_stack->capacity()/4,
(size_t)ParGCDesiredObjsFromOverflowList);
bool res = _collector->take_from_overflow_list(num, _mark_stack);
assert(_collector->overflow_list_is_empty() || res,
"If list is not empty, we should have taken something");
--- 8907,8917 ----
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
! size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
(size_t)ParGCDesiredObjsFromOverflowList);
bool res = _collector->take_from_overflow_list(num, _mark_stack);
assert(_collector->overflow_list_is_empty() || res,
"If list is not empty, we should have taken something");