< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49484 : imported patch 8197573-remove-secondary-free-list
rev 49485 : imported patch 8197573-stefanj-review2
rev 49493 : imported patch 8199326-remove-gc-time-stamp-logic-only
rev 49494 : imported patch 8199742-collectorstate-fixes
rev 49496 : imported patch 8151171-renamings
rev 49497 : [mq]: 8200234-g1concurrentmark-refactorings

*** 28,38 **** #include "code/codeCache.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1ConcurrentMark.inline.hpp" - #include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" #include "gc/g1/g1StringDedup.hpp" #include "gc/g1/heapRegion.inline.hpp" --- 28,37 ----
*** 490,499 **** --- 489,500 ---- reset_at_marking_complete(); _completed_initialization = true; } void G1ConcurrentMark::reset() { + _has_aborted = false; + reset_marking_for_restart(); // Reset all tasks, since different phases will use different number of active // threads. So, it's easiest to have all of them ready. for (uint i = 0; i < _max_num_tasks; ++i) {
*** 517,535 **** } _top_at_rebuild_starts[region_idx] = NULL; _region_mark_stats[region_idx].clear(); } ! void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { ! assert_at_safepoint_on_vm_thread(); ! ! // Need to clear mark bit of the humongous object if already set and during a marking cycle. ! if (_next_mark_bitmap->is_marked(r->bottom())) { ! _next_mark_bitmap->clear(r->bottom()); ! } ! ! // Clear any statistics about the region gathered so far. uint const region_idx = r->hrm_index(); if (r->is_humongous()) { assert(r->is_starts_humongous(), "Got humongous continues region here"); uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); for (uint j = region_idx; j < (region_idx + size_in_regions); j++) { --- 518,528 ---- } _top_at_rebuild_starts[region_idx] = NULL; _region_mark_stats[region_idx].clear(); } ! void G1ConcurrentMark::clear_statistics(HeapRegion* r) { uint const region_idx = r->hrm_index(); if (r->is_humongous()) { assert(r->is_starts_humongous(), "Got humongous continues region here"); uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(oop(r->humongous_start_region()->bottom())->size()); for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
*** 538,547 **** --- 531,557 ---- } else { clear_statistics_in_region(region_idx); } } + void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) { + assert_at_safepoint_on_vm_thread(); + + G1CMBitMap* const bitmap = _g1h->collector_state()->mark_or_rebuild_in_progress() ? _next_mark_bitmap : _prev_mark_bitmap; + // Need to clear mark bit of the humongous object if already set and during a marking cycle. + if (bitmap->is_marked(r->bottom())) { + bitmap->clear(r->bottom()); + } + + if (!_g1h->collector_state()->mark_or_rebuild_in_progress()) { + return; + } + + // Clear any statistics about the region gathered so far. + clear_statistics(r); + } + void G1ConcurrentMark::reset_marking_for_restart() { _global_mark_stack.set_empty(); // Expand the marking stack, if we have to and if we can. if (has_overflown()) {
*** 743,754 **** return false; } }; void G1ConcurrentMark::pre_initial_mark() { - _has_aborted = false; - // Initialize marking structures. This has to be done in a STW phase. reset(); // For each region note start of marking. NoteStartOfMarkHRClosure startcl; --- 753,762 ----
*** 951,960 **** --- 959,970 ---- _g1h->trace_heap_before_gc(_gc_tracer_cm); } void G1ConcurrentMark::concurrent_cycle_end() { + _g1h->collector_state()->set_clearing_next_bitmap(false); + _g1h->trace_heap_after_gc(_gc_tracer_cm); if (has_aborted()) { log_info(gc, marking)("Concurrent Mark Abort"); _gc_tracer_cm->report_concurrent_mode_failure();
*** 984,993 **** --- 994,1021 ---- G1CMConcurrentMarkingTask marking_task(this); _concurrent_workers->run_task(&marking_task); print_stats(); } + void G1ConcurrentMark::verify_during_pause(G1HeapVerifier::G1VerifyType type, VerifyOption vo, const char* caller) { + G1HeapVerifier* verifier = _g1h->verifier(); + + verifier->verify_region_sets_optional(); + + if (VerifyDuringGC) { + GCTraceTime(Debug, gc, phases) trace(caller, _gc_timer_cm); + + size_t const BufLen = 512; + char buffer[BufLen]; + + os::snprintf(buffer, BufLen, "During GC (%s)", caller); + verifier->verify(type, vo, buffer); + } + + verifier->check_bitmaps(caller); + } + class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure { G1CollectedHeap* _g1h; G1ConcurrentMark* _cm; uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
*** 1033,1071 **** // have ended up here as the Remark VM operation has been scheduled already. if (has_aborted()) { return; } - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark before)"); - } - _g1h->verifier()->check_bitmaps("Remark Start"); - G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_remark_start(); double start = os::elapsedTime(); finalize_marking(); double mark_work_end = os::elapsedTime(); weak_refs_work(false /* clear_all_soft_refs */); - if (has_overflown()) { - // We overflowed. Restart concurrent marking. - _restart_for_overflow = true; - - // Verify the heap w.r.t. the previous marking bitmap. - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (Remark overflow)"); - } - - // Clear the marking state because we will be restarting - // marking due to overflowing the global mark stack. - reset_marking_for_restart(); - } else { SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); // We're done with marking. // This is the end of the marking cycle, we're expected all // threads to have SATB queues with active set to true. satb_mq_set.set_active_all_threads(false, /* new active value */ --- 1061,1088 ---- // have ended up here as the Remark VM operation has been scheduled already. if (has_aborted()) { return; } G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_remark_start(); double start = os::elapsedTime(); + verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before"); + + { + GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); finalize_marking(); + } double mark_work_end = os::elapsedTime(); + bool const mark_finished = !has_overflown(); + if (mark_finished) { weak_refs_work(false /* clear_all_soft_refs */); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); // We're done with marking. // This is the end of the marking cycle, we're expected all // threads to have SATB queues with active set to true. satb_mq_set.set_active_all_threads(false, /* new active value */
*** 1082,1110 **** _g1h->heap_region_iterate(&cl); log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", _g1h->num_regions(), cl.num_selected_for_rebuild()); } ! if (VerifyDuringGC) { ! _g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "During GC (Remark after)"); ! } ! _g1h->verifier()->check_bitmaps("Remark End"); assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed reset_at_marking_complete(); } // Statistics double now = os::elapsedTime(); _remark_mark_times.add((mark_work_end - start) * 1000.0); _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); _remark_times.add((now - start) * 1000.0); g1p->record_concurrent_mark_remark_end(); - - G1CMIsAliveClosure is_alive(_g1h); - _gc_tracer_cm->report_object_count_after_gc(&is_alive); } class G1CleanupTask : public AbstractGangTask { // Per-region work during the Cleanup pause. class G1CleanupRegionsClosure : public HeapRegionClosure { --- 1099,1136 ---- _g1h->heap_region_iterate(&cl); log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u", _g1h->num_regions(), cl.num_selected_for_rebuild()); } ! verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UseNextMarking, "Remark after"); ! assert(!restart_for_overflow(), "sanity"); // Completely reset the marking state since marking completed reset_at_marking_complete(); + } else { + // We overflowed. Restart concurrent marking. + _restart_for_overflow = true; + + verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark overflow"); + + // Clear the marking state because we will be restarting + // marking due to overflowing the global mark stack. + reset_marking_for_restart(); + } + + { + GCTraceTime(Debug, gc, phases)("Report Object Count"); + report_object_count(); } // Statistics double now = os::elapsedTime(); _remark_mark_times.add((mark_work_end - start) * 1000.0); _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); _remark_times.add((now - start) * 1000.0); g1p->record_concurrent_mark_remark_end(); } class G1CleanupTask : public AbstractGangTask { // Per-region work during the Cleanup pause. class G1CleanupRegionsClosure : public HeapRegionClosure {
*** 1142,1151 **** --- 1168,1179 ---- } else { _old_regions_removed++; _g1h->free_region(hr, _local_cleanup_list, false /* skip_remset */, false /* skip_hcc */, true /* locked */); } hr->clear_cardtable(); + _g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index()); + log_trace(gc)("Reclaimed empty region %u (%s) bot " PTR_FORMAT, hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom())); } else { hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); } return false;
*** 1195,1204 **** --- 1223,1233 ---- G1CleanupTask cl(_g1h, &empty_regions_list, workers->active_workers()); workers->run_task(&cl); if (!empty_regions_list.is_empty()) { + log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); // Now print the empty regions list. G1HRPrinter* hrp = _g1h->hr_printer(); if (hrp->is_active()) { FreeRegionListIterator iter(&empty_regions_list); while (iter.more_available()) {
*** 1217,1248 **** // If a full collection has happened, we shouldn't do this. if (has_aborted()) { return; } - _g1h->verifier()->verify_region_sets_optional(); - - if (VerifyDuringGC) { // While rebuilding the remembered set we used the next marking... - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "During GC (Cleanup before)"); - } - _g1h->verifier()->check_bitmaps("Cleanup Start"); - G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_cleanup_start(); double start = os::elapsedTime(); { GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild"); G1UpdateRemSetTrackingAfterRebuild cl(_g1h); _g1h->heap_region_iterate(&cl); } - double count_end = os::elapsedTime(); - double this_final_counting_time = (count_end - start); - _total_cleanup_time += this_final_counting_time; - if (log_is_enabled(Trace, gc, liveness)) { G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); _g1h->heap_region_iterate(&cl); } --- 1246,1268 ---- // If a full collection has happened, we shouldn't do this. if (has_aborted()) { return; } G1Policy* g1p = _g1h->g1_policy(); g1p->record_concurrent_mark_cleanup_start(); double start = os::elapsedTime(); + verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UseNextMarking, "Cleanup before"); + { GCTraceTime(Debug, gc, phases)("Update Remembered Set Tracking After Rebuild"); G1UpdateRemSetTrackingAfterRebuild cl(_g1h); _g1h->heap_region_iterate(&cl); } if (log_is_enabled(Trace, gc, liveness)) { G1PrintRegionLivenessInfoClosure cl("Post-Cleanup"); _g1h->heap_region_iterate(&cl); }
*** 1251,1294 **** { GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions"); reclaim_empty_regions(); } - { - GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); - _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); - } - - // Statistics. - double end = os::elapsedTime(); - _cleanup_times.add((end - start) * 1000.0); - // Cleanup will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); - if (VerifyDuringGC) { - _g1h->verifier()->verify(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "During GC (Cleanup after)"); - } - - _g1h->verifier()->check_bitmaps("Cleanup End"); - - _g1h->verifier()->verify_region_sets_optional(); - - // We need to make this be a "collection" so any collection pause that - // races with it goes around and waits for completeCleanup to finish. - _g1h->increment_total_collections(); - // Clean out dead classes and update Metaspace sizes. if (ClassUnloadingWithConcurrentMark) { ClassLoaderDataGraph::purge(); } MetaspaceGC::compute_new_size(); // We reclaimed old regions so we should calculate the sizes to make // sure we update the old gen/space data. _g1h->g1mm()->update_sizes(); } // Supporting Object and Oop closures for reference discovery // and processing in during marking --- 1271,1310 ---- { GCTraceTime(Debug, gc, phases)("Reclaim Empty Regions"); reclaim_empty_regions(); } // Cleanup will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); // Clean out dead classes and update Metaspace sizes. if (ClassUnloadingWithConcurrentMark) { + GCTraceTime(Debug, gc, phases)("Purge Metaspace"); ClassLoaderDataGraph::purge(); } MetaspaceGC::compute_new_size(); // We reclaimed old regions so we should calculate the sizes to make // sure we update the old gen/space data. _g1h->g1mm()->update_sizes(); + + verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after"); + + // We need to make this be a "collection" so any collection pause that + // races with it goes around and waits for Cleanup to finish. + _g1h->increment_total_collections(); + + // Local statistics + double recent_cleanup_time = (os::elapsedTime() - start); + _total_cleanup_time += recent_cleanup_time; + _cleanup_times.add(recent_cleanup_time); + + { + GCTraceTime(Debug, gc, phases)("Finalize Concurrent Mark Cleanup"); + _g1h->g1_policy()->record_concurrent_mark_cleanup_end(); + } } // Supporting Object and Oop closures for reference discovery // and processing in during marking
*** 1499,1518 **** _cm->set_concurrency(_active_workers); _workers->run_task(&enq_task_proxy); } void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { - if (has_overflown()) { - // Skip processing the discovered references if we have - // overflown the global marking stack. Reference objects - // only get discovered once so it is OK to not - // de-populate the discovered reference lists. We could have, - // but the only benefit would be that, when marking restarts, - // less reference objects are discovered. - return; - } - ResourceMark rm; HandleMark hm; // Is alive closure. G1CMIsAliveClosure g1_is_alive(_g1h); --- 1515,1524 ----
*** 1627,1640 **** --- 1633,1652 ---- // class unloading is disabled. _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); } } + void G1ConcurrentMark::report_object_count() { + G1CMIsAliveClosure is_alive(_g1h); + _gc_tracer_cm->report_object_count_after_gc(&is_alive); + } + void G1ConcurrentMark::swap_mark_bitmaps() { G1CMBitMap* temp = _prev_mark_bitmap; _prev_mark_bitmap = _next_mark_bitmap; _next_mark_bitmap = temp; + _g1h->collector_state()->set_clearing_next_bitmap(true); } // Closure for marking entries in SATB buffers. class G1CMSATBBufferClosure : public SATBBufferClosure { private:
*** 1729,1740 **** void G1ConcurrentMark::finalize_marking() { ResourceMark rm; HandleMark hm; - GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); - _g1h->ensure_parsability(false); // this is remark, so we'll use up all active threads uint active_workers = _g1h->workers()->active_workers(); set_concurrency_and_phase(active_workers, false /* concurrent */); --- 1741,1750 ----
< prev index next >