< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 1551,1563 **** // all clear. If we are assuming the collection from an asynchronous // collection, clear the _modUnionTable. assert(_collectorState != Idling || _modUnionTable.isAllClear(), "_modUnionTable should be clear if the baton was not passed"); _modUnionTable.clear_all(); ! assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(), "mod union for klasses should be clear if the baton was passed"); ! _ct->klass_rem_set()->clear_mod_union(); // We must adjust the allocation statistics being maintained // in the free list space. We do so by reading and clearing // the sweep timer and updating the block flux rate estimates below. assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); --- 1551,1564 ---- // all clear. If we are assuming the collection from an asynchronous // collection, clear the _modUnionTable. assert(_collectorState != Idling || _modUnionTable.isAllClear(), "_modUnionTable should be clear if the baton was not passed"); _modUnionTable.clear_all(); ! assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(), "mod union for klasses should be clear if the baton was passed"); ! _ct->cld_rem_set()->clear_mod_union(); ! // We must adjust the allocation statistics being maintained // in the free list space. We do so by reading and clearing // the sweep timer and updating the block flux rate estimates below. assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
*** 2023,2033 **** // The young collections clear the modified oops state, which tells if // there are any modified oops in the class. The remark phase also needs // that information. Tell the young collection to save the union of all // modified klasses. if (duringMarking) { ! _ct->klass_rem_set()->set_accumulate_modified_oops(true); } bool registerClosure = duringMarking; _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar); --- 2024,2034 ---- // The young collections clear the modified oops state, which tells if // there are any modified oops in the class. The remark phase also needs // that information. Tell the young collection to save the union of all // modified klasses. if (duringMarking) { ! _ct->cld_rem_set()->set_accumulate_modified_oops(true); } bool registerClosure = duringMarking; _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
*** 2099,2109 **** return; } assert(haveFreelistLocks(), "must have freelist locks"); assert_lock_strong(bitMapLock()); ! _ct->klass_rem_set()->set_accumulate_modified_oops(false); _cmsGen->gc_epilogue_work(full); if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { // in case sampling was not already enabled, enable it --- 2100,2110 ---- return; } assert(haveFreelistLocks(), "must have freelist locks"); assert_lock_strong(bitMapLock()); ! _ct->cld_rem_set()->set_accumulate_modified_oops(false); _cmsGen->gc_epilogue_work(full); if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { // in case sampling was not already enabled, enable it
*** 2378,2399 **** gch->print_on(&ls); fatal("CMS: failed marking verification after remark"); } } ! class VerifyKlassOopsKlassClosure : public KlassClosure { ! class VerifyKlassOopsClosure : public OopClosure { CMSBitMap* _bitmap; public: ! VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { } void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); } void do_oop(narrowOop* p) { ShouldNotReachHere(); } } _oop_closure; public: ! VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {} ! void do_klass(Klass* k) { ! k->oops_do(&_oop_closure); } }; void CMSCollector::verify_after_remark_work_2() { ResourceMark rm; --- 2379,2400 ---- gch->print_on(&ls); fatal("CMS: failed marking verification after remark"); } } ! class VerifyCLDOopsCLDClosure : public CLDClosure { ! class VerifyCLDOopsClosure : public OopClosure { CMSBitMap* _bitmap; public: ! VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { } void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); } void do_oop(narrowOop* p) { ShouldNotReachHere(); } } _oop_closure; public: ! VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {} ! void do_cld(ClassLoaderData* cld) { ! cld->oops_do(&_oop_closure, false, false); } }; void CMSCollector::verify_after_remark_work_2() { ResourceMark rm;
*** 2435,2446 **** verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); } assert(verification_mark_stack()->isEmpty(), "Should have been drained"); verify_work_stacks_empty(); ! VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm()); ! ClassLoaderDataGraph::classes_do(&verify_klass_oops); // Marking completed -- now verify that each bit marked in // verification_mark_bm() is also marked in markBitMap(); flag all // errors by printing corresponding objects. VerifyMarkedClosure vcl(markBitMap()); --- 2436,2447 ---- verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); } assert(verification_mark_stack()->isEmpty(), "Should have been drained"); verify_work_stacks_empty(); ! VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm()); ! ClassLoaderDataGraph::cld_do(&verify_cld_oops); // Marking completed -- now verify that each bit marked in // verification_mark_bm() is also marked in markBitMap(); flag all // errors by printing corresponding objects. VerifyMarkedClosure vcl(markBitMap());
*** 2909,2919 **** assert(_modUnionTable.isAllClear(), "Was cleared in most recent final checkpoint phase" " or no bits are set in the gc_prologue before the start of the next " "subsequent marking phase."); ! assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be"); // Save the end of the used_region of the constituent generations // to be used to limit the extent of sweep in each generation. save_sweep_limits(); verify_overflow_empty(); --- 2910,2920 ---- assert(_modUnionTable.isAllClear(), "Was cleared in most recent final checkpoint phase" " or no bits are set in the gc_prologue before the start of the next " "subsequent marking phase."); ! assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be"); // Save the end of the used_region of the constituent generations // to be used to limit the extent of sweep in each generation. save_sweep_limits(); verify_overflow_empty();
*** 3846,3856 **** cumNumCards += curNumCards; break; } } ! preclean_klasses(&mrias_cl, _cmsGen->freelistLock()); curNumCards = preclean_card_table(_cmsGen, &smoac_cl); cumNumCards += curNumCards; log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", curNumCards, cumNumCards, numIter); --- 3847,3857 ---- cumNumCards += curNumCards; break; } } ! preclean_cld(&mrias_cl, _cmsGen->freelistLock()); curNumCards = preclean_card_table(_cmsGen, &smoac_cl); cumNumCards += curNumCards; log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", curNumCards, cumNumCards, numIter);
*** 4065,4098 **** verify_work_stacks_empty(); verify_overflow_empty(); return cumNumDirtyCards; } ! class PrecleanKlassClosure : public KlassClosure { ! KlassToOopClosure _cm_klass_closure; public: ! PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} ! void do_klass(Klass* k) { ! if (k->has_accumulated_modified_oops()) { ! k->clear_accumulated_modified_oops(); ! _cm_klass_closure.do_klass(k); } } }; // The freelist lock is needed to prevent asserts, is it really needed? ! void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) { cl->set_freelistLock(freelistLock); CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock()); // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean? // SSS: We should probably check if precleaning should be aborted, at suitable intervals? ! PrecleanKlassClosure preclean_klass_closure(cl); ! ClassLoaderDataGraph::classes_do(&preclean_klass_closure); verify_work_stacks_empty(); verify_overflow_empty(); } --- 4066,4099 ---- verify_work_stacks_empty(); verify_overflow_empty(); return cumNumDirtyCards; } ! class PrecleanCLDClosure : public CLDClosure { ! MetadataAwareOopsInGenClosure* _cm_closure; public: ! PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {} ! void do_cld(ClassLoaderData* cld) { ! if (cld->has_accumulated_modified_oops()) { ! cld->clear_accumulated_modified_oops(); ! _cm_closure->do_cld(cld); } } }; // The freelist lock is needed to prevent asserts, is it really needed? ! void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) { cl->set_freelistLock(freelistLock); CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock()); // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean? // SSS: We should probably check if precleaning should be aborted, at suitable intervals? ! PrecleanCLDClosure preclean_closure(cl); ! ClassLoaderDataGraph::cld_do(&preclean_closure); verify_work_stacks_empty(); verify_overflow_empty(); }
*** 4248,4258 **** // Change under the freelistLocks. _collectorState = Sweeping; // Call isAllClear() under bitMapLock assert(_modUnionTable.isAllClear(), "Should be clear by end of the final marking"); ! assert(_ct->klass_rem_set()->mod_union_is_clear(), "Should be clear by end of the final marking"); } void CMSParInitialMarkTask::work(uint worker_id) { elapsedTimer _timer; --- 4249,4259 ---- // Change under the freelistLocks. _collectorState = Sweeping; // Call isAllClear() under bitMapLock assert(_modUnionTable.isAllClear(), "Should be clear by end of the final marking"); ! assert(_ct->cld_rem_set()->mod_union_is_clear(), "Should be clear by end of the final marking"); } void CMSParInitialMarkTask::work(uint worker_id) { elapsedTimer _timer;
*** 4330,4359 **** // ... work stealing for the above void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed); }; ! class RemarkKlassClosure : public KlassClosure { ! KlassToOopClosure _cm_klass_closure; public: ! RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} ! void do_klass(Klass* k) { ! // Check if we have modified any oops in the Klass during the concurrent marking. ! if (k->has_accumulated_modified_oops()) { ! k->clear_accumulated_modified_oops(); // We could have transfered the current modified marks to the accumulated marks, // like we do with the Card Table to Mod Union Table. But it's not really necessary. ! } else if (k->has_modified_oops()) { // Don't clear anything, this info is needed by the next young collection. } else { ! // No modified oops in the Klass. return; } // The klass has modified fields, need to scan the klass. ! _cm_klass_closure.do_klass(k); } }; void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) { ParNewGeneration* young_gen = _collector->_young_gen; --- 4331,4360 ---- // ... work stealing for the above void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed); }; ! class RemarkCLDClosure : public CLDClosure { ! CLDToOopClosure _cm_closure; public: ! RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure) {} ! void do_cld(ClassLoaderData* cld) { ! // Check if we have modified any oops in the CLD during the concurrent marking. ! if (cld->has_accumulated_modified_oops()) { ! cld->clear_accumulated_modified_oops(); // We could have transfered the current modified marks to the accumulated marks, // like we do with the Card Table to Mod Union Table. But it's not really necessary. ! } else if (cld->has_modified_oops()) { // Don't clear anything, this info is needed by the next young collection. } else { ! // No modified oops in the ClassLoaderData. return; } // The klass has modified fields, need to scan the klass. ! _cm_closure.do_cld(cld); } }; void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) { ParNewGeneration* young_gen = _collector->_young_gen;
*** 4437,4464 **** _timer.stop(); log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } ! // ---------- dirty klass scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset(); _timer.start(); // Scan all classes that was dirtied during the concurrent marking phase. ! RemarkKlassClosure remark_klass_closure(&par_mrias_cl); ! ClassLoaderDataGraph::classes_do(&remark_klass_closure); _timer.stop(); ! log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } - // We might have added oops to ClassLoaderData::_handles during the - // concurrent marking phase. These oops point to newly allocated objects - // that are guaranteed to be kept alive. Either by the direct allocation - // code, or when the young collector processes the roots. Hence, - // we don't have to revisit the _handles block during the remark phase. // ---------- rescan dirty cards ------------ _timer.reset(); _timer.start(); --- 4438,4465 ---- _timer.stop(); log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } ! // We might have added oops to ClassLoaderData::_handles during the ! // concurrent marking phase. These oops do not always point to newly allocated objects ! // that are guaranteed to be kept alive. Hence, ! // we do have to revisit the _handles block during the remark phase. ! ! // ---------- dirty CLD scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset(); _timer.start(); // Scan all classes that was dirtied during the concurrent marking phase. ! RemarkCLDClosure remark_closure(&par_mrias_cl); ! ClassLoaderDataGraph::cld_do(&remark_closure); _timer.stop(); ! log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- rescan dirty cards ------------ _timer.reset(); _timer.start();
*** 4979,5005 **** ClassLoaderDataGraph::remember_new_clds(false); verify_work_stacks_empty(); } { ! GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm); verify_work_stacks_empty(); ! RemarkKlassClosure remark_klass_closure(&mrias_cl); ! ClassLoaderDataGraph::classes_do(&remark_klass_closure); verify_work_stacks_empty(); } - // We might have added oops to ClassLoaderData::_handles during the - // concurrent marking phase. These oops point to newly allocated objects - // that are guaranteed to be kept alive. Either by the direct allocation - // code, or when the young collector processes the roots. Hence, - // we don't have to revisit the _handles block during the remark phase. - verify_work_stacks_empty(); // Restore evacuated mark words, if any, used for overflow list links restore_preserved_marks_if_any(); verify_overflow_empty(); --- 4980,5004 ---- ClassLoaderDataGraph::remember_new_clds(false); verify_work_stacks_empty(); } + // We might have added oops to ClassLoaderData::_handles during the + // concurrent marking phase. These oops do not point to newly allocated objects + // that are guaranteed to be kept alive. Hence, + // we do have to revisit the _handles block during the remark phase. { ! GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm); verify_work_stacks_empty(); ! RemarkCLDClosure remark_closure(&mrias_cl); ! ClassLoaderDataGraph::cld_do(&remark_closure); verify_work_stacks_empty(); } verify_work_stacks_empty(); // Restore evacuated mark words, if any, used for overflow list links restore_preserved_marks_if_any(); verify_overflow_empty();
< prev index next >