--- old/src/hotspot/share/gc/g1/g1CollectorState.hpp 2018-03-27 15:50:42.727590684 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectorState.hpp 2018-03-27 15:50:42.473582850 +0200 @@ -64,6 +64,10 @@ // of the initial mark pause to the end of the Cleanup pause. bool _mark_or_rebuild_in_progress; + // The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap + // may be out of sync. + bool _clearing_next_bitmap; + // Set during a full gc pause. bool _in_full_gc; @@ -76,6 +80,7 @@ _initiate_conc_mark_if_possible(false), _mark_or_rebuild_in_progress(false), + _clearing_next_bitmap(false), _in_full_gc(false) { } // Phase setters @@ -89,6 +94,7 @@ void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } void set_mark_or_rebuild_in_progress(bool v) { _mark_or_rebuild_in_progress = v; } + void set_clearing_next_bitmap(bool v) { _clearing_next_bitmap = v; } // Phase getters bool in_young_only_phase() const { return _in_young_only_phase && !_in_full_gc; } @@ -102,6 +108,7 @@ bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; } bool mark_or_rebuild_in_progress() const { return _mark_or_rebuild_in_progress; } + bool clearing_next_bitmap() const { return _clearing_next_bitmap; } G1YCType yc_type() const { if (in_initial_mark_gc()) { --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-03-27 15:50:46.411704302 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-03-27 15:50:46.135695790 +0200 @@ -30,7 +30,6 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" -#include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" @@ -492,6 +491,8 @@ } void G1ConcurrentMark::reset() { + _has_aborted = false; + reset_marking_for_restart(); // Reset all tasks, since different phases will use different number of active @@ -519,15 +520,7 @@ _region_mark_stats[region_idx].clear(); } -void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { - assert_at_safepoint_on_vm_thread(); - - // Need to clear mark bit of the humongous object if already set and during a marking cycle. - if (_next_mark_bitmap->is_marked(r->bottom())) { - _next_mark_bitmap->clear(r->bottom()); - } - - // Clear any statistics about the region gathered so far. +void G1ConcurrentMark::clear_statistics(HeapRegion* r) { uint const region_idx = r->hrm_index(); if (r->is_humongous()) { assert(r->is_starts_humongous(), "Got humongous continues region here"); @@ -537,7 +530,24 @@ } } else { clear_statistics_in_region(region_idx); + } +} + +void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { + assert_at_safepoint_on_vm_thread(); + + G1CMBitMap* const bitmap = _g1h->collector_state()->mark_or_rebuild_in_progress() ? _next_mark_bitmap : _prev_mark_bitmap; + // Need to clear mark bit of the humongous object if already set and during a marking cycle. + if (bitmap->is_marked(r->bottom())) { + bitmap->clear(r->bottom()); } + + if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { + return; + } + + // Clear any statistics about the region gathered so far. + clear_statistics(r); } void G1ConcurrentMark::reset_marking_for_restart() { @@ -745,8 +755,6 @@ }; void G1ConcurrentMark::pre_initial_mark() { - _has_aborted = false; - // Initialize marking structures. This has to be done in a STW phase. reset(); @@ -953,6 +961,8 @@ } void G1ConcurrentMark::concurrent_cycle_end() { + _g1h->collector_state()->set_clearing_next_bitmap(false); + _g1h->trace_heap_after_gc(_gc_tracer_cm); if (has_aborted()) { @@ -986,6 +996,24 @@ print_stats(); } +void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { + G1HeapVerifier* verifier = _g1h->verifier(); + + verifier->verify_region_sets_optional(); + + if (VerifyDuringGC) { + GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm); + + size_t const BufLen = 512; + char buffer[BufLen]; + + os::snprintf(buffer, BufLen, "During GC (%s)", caller); + verifier->verify(type, vo, buffer); + } + + verifier->check_bitmaps(caller); +} + class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { G1CollectedHeap* _g1h; G1ConcurrentMark* _cm; @@ -1035,35 +1063,24 @@ return; } - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark before)"); - } - _g1h->verifier()->check_bitmaps("Remark Start"); - G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_remark_start(); double start = os::elapsedTime(); - finalize_marking(); - - double mark_work_end = os::elapsedTime(); + verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); - weak_refs_work(false /* clear_all_soft_refs */); + { + GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); + finalize_marking(); + } - if (has_overflown()) { - // We overflowed. Restart concurrent marking. - _restart_for_overflow = true; + double mark_work_end = os::elapsedTime(); - // Verify the heap w.r.t. the previous marking bitmap. - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark overflow)"); - } + bool const mark_finished = !has_overflown(); + if (mark_finished) { + weak_refs_work(false /* clear_all_soft_refs */); - // Clear the marking state because we will be restarting - // marking due to overflowing the global mark stack. - reset_marking_for_restart(); - } else { SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); // We're done with marking. // This is the end of the marking cycle, we're expected all @@ -1084,13 +1101,25 @@ _g1h->num_regions(), cl.num_selected_for_rebuild()); } - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (Remark after)"); - } - _g1h->verifier()->check_bitmaps("Remark End"); + verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after"); + assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed reset_at_marking_complete(); + } else { + // We overflowed. Restart concurrent marking. + _restart_for_overflow = true; + + verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); + + // Clear the marking state because we will be restarting + // marking due to overflowing the global mark stack. + reset_marking_for_restart(); + } + + { + GCTraceTime(Debug, gc, phases)("Report Object Count"); + report_object_count(); } // Statistics @@ -1100,9 +1129,6 @@ _remark_times.add((now - start) * 1000.0); g1p->record_concurrent_mark_remark_end(); - - G1CMIsAliveClosure is_alive(_g1h); - _gc_tracer_cm->report_object_count_after_gc(&is_alive); } class G1CleanupTask : public AbstractGangTask { @@ -1144,6 +1170,8 @@ _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); } hr->clear_cardtable(); + _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); + log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); } else { hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); } @@ -1197,6 +1225,7 @@ workers->run_task(&cl); if (!empty_regions_list.is_empty()) { + log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); // Now print the empty regions list. G1HRPrinter* hrp = _g1h->hr_printer(); if (hrp->is_active()) { @@ -1219,28 +1248,19 @@ return; } - _g1h->verifier()->verify_region_sets_optional(); - - if (VerifyDuringGC) { // While rebuilding the remembered set we used the next marking... - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "During GC (Cleanup before)"); - } - _g1h->verifier()->check_bitmaps("Cleanup Start"); - G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_cleanup_start(); double start = os::elapsedTime(); + verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before"); + { GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild"); G1UpdateRemSetTrackingAfterRebuild cl(_g1h); _g1h->heap_region_iterate(&cl); } - double count_end = os::elapsedTime(); - double this_final_counting_time = (count_end - start); - _total_cleanup_time += this_final_counting_time; - if (log_is_enabled(Trace, gc, liveness)) { G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); _g1h->heap_region_iterate(&cl); @@ -1253,33 +1273,13 @@ reclaim_empty_regions(); } - { - GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); - _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); - } - - // Statistics. - double end = os::elapsedTime(); - _cleanup_times.add((end - start) * 1000.0); - // Cleanup will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (Cleanup after)"); - } - - _g1h->verifier()->check_bitmaps("Cleanup End"); - - _g1h->verifier()->verify_region_sets_optional(); - - // We need to make this be a "collection" so any collection pause that - // races with it goes around and waits for completeCleanup to finish. - _g1h->increment_total_collections(); - // Clean out dead classes and update Metaspace sizes. if (ClassUnloadingWithConcurrentMark) { + GCTraceTime(Debug, gc, phases)("Purge Metaspace"); ClassLoaderDataGraph::purge(); } MetaspaceGC::compute_new_size(); @@ -1287,6 +1287,22 @@ // We reclaimed old regions so we should calculate the sizes to make // sure we update the old gen/space data. _g1h->g1mm()->update_sizes(); + + verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); + + // We need to make this be a "collection" so any collection pause that + // races with it goes around and waits for Cleanup to finish. + _g1h->increment_total_collections(); + + // Local statistics + double recent_cleanup_time = (os::elapsedTime() - start); + _total_cleanup_time += recent_cleanup_time; + _cleanup_times.add(recent_cleanup_time); + + { + GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); + _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); + } } // Supporting Object and Oop closures for reference discovery @@ -1501,16 +1517,6 @@ } void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { - if (has_overflown()) { - // Skip processing the discovered references if we have - // overflown the global marking stack. Reference objects - // only get discovered once so it is OK to not - // de-populate the discovered reference lists. We could have, - // but the only benefit would be that, when marking restarts, - // less reference objects are discovered. - return; - } - ResourceMark rm; HandleMark hm; @@ -1629,10 +1635,16 @@ } } +void G1ConcurrentMark::report_object_count() { + G1CMIsAliveClosure is_alive(_g1h); + _gc_tracer_cm->report_object_count_after_gc(&is_alive); +} + void G1ConcurrentMark::swap_mark_bitmaps() { G1CMBitMap* temp = _prev_mark_bitmap; _prev_mark_bitmap = _next_mark_bitmap; _next_mark_bitmap = temp; + _g1h->collector_state()->set_clearing_next_bitmap(true); } // Closure for marking entries in SATB buffers. @@ -1731,8 +1743,6 @@ ResourceMark rm; HandleMark hm; - GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); - _g1h->ensure_parsability(false); // this is remark, so we'll use up all active threads --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2018-03-27 15:50:47.559739708 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2018-03-27 15:50:47.302731782 +0200 @@ -27,6 +27,7 @@ #include "gc/g1/g1ConcurrentMarkBitMap.hpp" #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp" +#include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1RegionMarkStatsCache.hpp" #include "gc/g1/heapRegionSet.hpp" #include "gc/shared/taskqueue.hpp" @@ -365,15 +366,23 @@ uint _num_concurrent_workers; // The number of marking worker threads we're using uint _max_concurrent_workers; // Maximum number of marking worker threads + void verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller); + void finalize_marking(); void weak_refs_work_parallel_part(BoolObjectClosure* is_alive, bool purged_classes); void weak_refs_work(bool clear_all_soft_refs); + void report_object_count(); + void swap_mark_bitmaps(); void reclaim_empty_regions(); + // Clear statistics gathered during the concurrent cycle for the given region after + // it has been reclaimed. + void clear_statistics(HeapRegion* r); + // Resets the global marking data structures, as well as the // task local ones; should be called during initial mark. void reset(); @@ -447,9 +456,6 @@ // true, periodically insert checks to see if this method should exit prematurely. void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield); - // Clear statistics gathered during the concurrent cycle for the given region after - // it has been reclaimed. - void clear_statistics_in_region(uint region_idx); // Region statistics gathered during marking. G1RegionMarkStats* _region_mark_stats; // Top pointer for each region at the start of the rebuild remembered set process @@ -468,6 +474,9 @@ // TARS for the given region during remembered set rebuilding. inline HeapWord* top_at_rebuild_start(uint region) const; + // Clear statistics gathered during the concurrent cycle for the given region after + // it has been reclaimed. + void clear_statistics_in_region(uint region_idx); // Notification for eagerly reclaimed regions to clean up. void humongous_object_eagerly_reclaimed(HeapRegion* r); // Manipulation of the global mark stack. --- old/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-03-27 15:50:57.309040377 +0200 +++ new/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-03-27 15:50:57.041032112 +0200 @@ -651,10 +651,8 @@ bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end); bool res_n = true; - // We reset mark_or_rebuild_in_progress() before we reset _cmThread->in_progress() and in this window - // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap - // if we happen to be in that state. - if (_g1h->collector_state()->mark_or_rebuild_in_progress() || !_g1h->_cmThread->in_progress()) { + // We cannot verify the next bitmap while we are about to clear it. + if (!_g1h->collector_state()->clearing_next_bitmap()) { res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); } if (!res_p || !res_n) { @@ -666,7 +664,9 @@ } void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) { - if (!G1VerifyBitmaps) return; + if (!G1VerifyBitmaps) { + return; + } guarantee(verify_bitmaps(caller, hr), "bitmap verification"); } @@ -693,7 +693,9 @@ }; void G1HeapVerifier::check_bitmaps(const char* caller) { - if (!G1VerifyBitmaps) return; + if (!G1VerifyBitmaps) { + return; + } G1VerifyBitmapClosure cl(caller, this); _g1h->heap_region_iterate(&cl); --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2018-03-27 15:51:14.863581761 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2018-03-27 15:51:14.602573711 +0200 @@ -439,6 +439,7 @@ collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); collector_state()->set_in_initial_mark_gc(false); collector_state()->set_mark_or_rebuild_in_progress(false); + collector_state()->set_clearing_next_bitmap(false); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups --- old/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2018-03-27 15:51:18.969708394 +0200 +++ new/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2018-03-27 15:51:18.710700406 +0200 @@ -247,6 +247,7 @@ inline void HeapRegion::note_end_of_marking() { _prev_top_at_mark_start = _next_top_at_mark_start; + _next_top_at_mark_start = bottom(); _prev_marked_bytes = _next_marked_bytes; _next_marked_bytes = 0; } --- old/src/hotspot/share/memory/iterator.hpp 2018-03-27 15:51:20.078742597 +0200 +++ new/src/hotspot/share/memory/iterator.hpp 2018-03-27 15:51:19.818734578 +0200 @@ -133,6 +133,9 @@ _wrapped_closure->do_oop(p); } void do_oop(narrowOop* p) { assert(false, "Only the _nv versions should be used"); _wrapped_closure->do_oop(p);} +#ifdef ASSERT + bool should_verify_oops() { return false; } +#endif }; class KlassClosure : public Closure {