--- old/src/share/vm/gc/cms/vmCMSOperations.cpp 2015-09-09 16:12:50.383861878 +0200 +++ new/src/share/vm/gc/cms/vmCMSOperations.cpp 2015-09-09 16:12:50.271861882 +0200 @@ -134,7 +134,6 @@ return; } HS_PRIVATE_CMS_INITMARK_BEGIN(); - GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); @@ -162,7 +161,6 @@ return; } HS_PRIVATE_CMS_REMARK_BEGIN(); - GCIdMark gc_id_mark(_gc_id); _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); --- old/src/share/vm/gc/cms/vmCMSOperations.hpp 2015-09-09 16:12:50.591861871 +0200 +++ new/src/share/vm/gc/cms/vmCMSOperations.hpp 2015-09-09 16:12:50.479861875 +0200 @@ -27,7 +27,6 @@ #include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/shared/gcCause.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" #include "runtime/vm_operations.hpp" @@ -54,7 +53,6 @@ protected: CMSCollector* _collector; // associated collector bool _prologue_succeeded; // whether doit_prologue succeeded - uint _gc_id; bool lost_race() const; @@ -65,8 +63,7 @@ public: VM_CMS_Operation(CMSCollector* collector): _collector(collector), - _prologue_succeeded(false), - _gc_id(GCId::current()) {} + _prologue_succeeded(false) {} ~VM_CMS_Operation() {} // The legal collector state for executing this CMS op. --- old/src/share/vm/gc/cms/yieldingWorkgroup.cpp 2015-09-09 16:12:50.803861863 +0200 +++ new/src/share/vm/gc/cms/yieldingWorkgroup.cpp 2015-09-09 16:12:50.691861867 +0200 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "gc/cms/yieldingWorkgroup.hpp" -#include "gc/shared/gcId.hpp" #include "utilities/macros.hpp" YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id) @@ -341,7 +340,6 @@ // Now, release the gang mutex and do the work. { MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag); - GCIdMark gc_id_mark(data.task()->gc_id()); data.task()->work(id); // This might include yielding } // Reacquire monitor and note completion of this worker --- old/src/share/vm/gc/g1/concurrentMark.cpp 2015-09-09 16:12:51.019861856 +0200 +++ new/src/share/vm/gc/g1/concurrentMark.cpp 2015-09-09 16:12:50.903861860 +0200 @@ -41,7 +41,6 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/suspendibleThreadSet.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" --- old/src/share/vm/gc/g1/concurrentMark.hpp 2015-09-09 16:12:51.259861848 +0200 +++ new/src/share/vm/gc/g1/concurrentMark.hpp 2015-09-09 16:12:51.147861851 +0200 @@ -28,7 +28,6 @@ #include "classfile/javaClasses.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/heapRegionSet.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/taskqueue.hpp" class G1CollectedHeap; --- old/src/share/vm/gc/g1/concurrentMarkThread.cpp 2015-09-09 16:12:51.475861840 +0200 +++ new/src/share/vm/gc/g1/concurrentMarkThread.cpp 2015-09-09 16:12:51.359861844 +0200 @@ -110,6 +110,7 @@ } GCIdMark gc_id_mark; + { ResourceMark rm; HandleMark hm; --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-09-09 16:12:51.671861833 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-09-09 16:12:51.559861837 +0200 @@ -3992,360 +3992,364 @@ _gc_timer_stw->register_gc_start(); - GCIdMark gc_id_mark; - _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; wait_for_root_region_scanning(); - G1Log::update_level(); - print_heap_before_gc(); - trace_heap_before_gc(_gc_tracer_stw); - - verify_region_sets_optional(); - verify_dirty_young_regions(); - - // This call will decide whether this pause is an initial-mark - // pause. If it is, during_initial_mark_pause() will return true - // for the duration of this pause. - g1_policy()->decide_on_conc_mark_initiation(); - - // We do not allow initial-mark to be piggy-backed on a mixed GC. - assert(!collector_state()->during_initial_mark_pause() || - collector_state()->gcs_are_young(), "sanity"); - - // We also do not allow mixed GCs during marking. - assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity"); - - // Record whether this pause is an initial mark. When the current - // thread has completed its logging output and it's safe to signal - // the CM thread, the flag's value in the policy has been reset. - bool should_start_conc_mark = collector_state()->during_initial_mark_pause(); - - // Inner scope for scope based logging, timers, and stats collection + bool should_start_conc_mark = fasle; { - EvacuationInfo evacuation_info; + GCIdMark gc_id_mark; + _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); - if (collector_state()->during_initial_mark_pause()) { - // We are about to start a marking cycle, so we increment the - // full collection counter. - increment_old_marking_cycles_started(); - register_concurrent_cycle_start(_gc_timer_stw->gc_start()); - } + G1Log::update_level(); + print_heap_before_gc(); + trace_heap_before_gc(_gc_tracer_stw); - _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); + verify_region_sets_optional(); + verify_dirty_young_regions(); - TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + // This call will decide whether this pause is an initial-mark + // pause. If it is, during_initial_mark_pause() will return true + // for the duration of this pause. + g1_policy()->decide_on_conc_mark_initiation(); + + // We do not allow initial-mark to be piggy-backed on a mixed GC. + assert(!collector_state()->during_initial_mark_pause() || + collector_state()->gcs_are_young(), "sanity"); + + // We also do not allow mixed GCs during marking. + assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity"); + + // Record whether this pause is an initial mark. When the current + // thread has completed its logging output and it's safe to signal + // the CM thread, the flag's value in the policy has been reset. + should_start_conc_mark = collector_state()->during_initial_mark_pause(); + + // Inner scope for scope based logging, timers, and stats collection + { + EvacuationInfo evacuation_info; + + if (collector_state()->during_initial_mark_pause()) { + // We are about to start a marking cycle, so we increment the + // full collection counter. + increment_old_marking_cycles_started(); + register_concurrent_cycle_start(_gc_timer_stw->gc_start()); + } - uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - workers()->set_active_workers(active_workers); - - double pause_start_sec = os::elapsedTime(); - g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress()); - log_gc_header(); - - TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); - TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); - - // If the secondary_free_list is not empty, append it to the - // free_list. No need to wait for the cleanup operation to finish; - // the region allocation code will check the secondary_free_list - // and wait if necessary. If the G1StressConcRegionFreeing flag is - // set, skip this step so that the region allocation code has to - // get entries from the secondary_free_list. - if (!G1StressConcRegionFreeing) { - append_secondary_free_list_if_not_empty_with_lock(); - } + _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); - assert(check_young_list_well_formed(), "young list should be well formed"); + TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - // Don't dynamically change the number of GC threads this early. A value of - // 0 is used to indicate serial work. When parallel work is done, - // it will be set. - - { // Call to jvmpi::post_class_unload_events must occur outside of active GC - IsGCActiveMark x; - - gc_prologue(false); - increment_total_collections(false /* full gc */); - increment_gc_time_stamp(); - - verify_before_gc(); - - check_bitmaps("GC Start"); - - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - // Please see comment in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() to see how - // reference processing currently works in G1. - - // Enable discovery in the STW reference processor - ref_processor_stw()->enable_discovery(); - - { - // We want to temporarily turn off discovery by the - // CM ref processor, if necessary, and turn it back on - // on again later if we do. Using a scoped - // NoRefDiscovery object will do this. - NoRefDiscovery no_cm_discovery(ref_processor_cm()); - - // Forget the current alloc region (we might even choose it to be part - // of the collection set!). - _allocator->release_mutator_alloc_region(); - - // We should call this after we retire the mutator alloc - // region(s) so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(false /* full */, (size_t) total_collections()); - - // This timing is only used by the ergonomics to handle our pause target. - // It is unclear why this should not include the full pause. We will - // investigate this in CR 7178365. - // - // Preserving the old comment here if that helps the investigation: - // - // The elapsed time induced by the start time below deliberately elides - // the possible verification above. - double sample_start_time_sec = os::elapsedTime(); + uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + workers()->set_active_workers(active_workers); + + double pause_start_sec = os::elapsedTime(); + g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress()); + log_gc_header(); + + TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); + TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); + + // If the secondary_free_list is not empty, append it to the + // free_list. No need to wait for the cleanup operation to finish; + // the region allocation code will check the secondary_free_list + // and wait if necessary. If the G1StressConcRegionFreeing flag is + // set, skip this step so that the region allocation code has to + // get entries from the secondary_free_list. + if (!G1StressConcRegionFreeing) { + append_secondary_free_list_if_not_empty_with_lock(); + } + + assert(check_young_list_well_formed(), "young list should be well formed"); + + // Don't dynamically change the number of GC threads this early. A value of + // 0 is used to indicate serial work. When parallel work is done, + // it will be set. + + { // Call to jvmpi::post_class_unload_events must occur outside of active GC + IsGCActiveMark x; + + gc_prologue(false); + increment_total_collections(false /* full gc */); + increment_gc_time_stamp(); + + verify_before_gc(); + + check_bitmaps("GC Start"); + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // Please see comment in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() to see how + // reference processing currently works in G1. + + // Enable discovery in the STW reference processor + ref_processor_stw()->enable_discovery(); + + { + // We want to temporarily turn off discovery by the + // CM ref processor, if necessary, and turn it back on + // on again later if we do. Using a scoped + // NoRefDiscovery object will do this. + NoRefDiscovery no_cm_discovery(ref_processor_cm()); + + // Forget the current alloc region (we might even choose it to be part + // of the collection set!). + _allocator->release_mutator_alloc_region(); + + // We should call this after we retire the mutator alloc + // region(s) so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(false /* full */, (size_t) total_collections()); + + // This timing is only used by the ergonomics to handle our pause target. + // It is unclear why this should not include the full pause. We will + // investigate this in CR 7178365. + // + // Preserving the old comment here if that helps the investigation: + // + // The elapsed time induced by the start time below deliberately elides + // the possible verification above. + double sample_start_time_sec = os::elapsedTime(); #if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); + gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec); + g1_policy()->record_collection_pause_start(sample_start_time_sec); #if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); - _young_list->print(); + gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); + _young_list->print(); #endif // YOUNG_LIST_VERBOSE - if (collector_state()->during_initial_mark_pause()) { - concurrent_mark()->checkpointRootsInitialPre(); - } + if (collector_state()->during_initial_mark_pause()) { + concurrent_mark()->checkpointRootsInitialPre(); + } #if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); + gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->finalize_cset(target_pause_time_ms); + g1_policy()->finalize_cset(target_pause_time_ms); - evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); + evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); - register_humongous_regions_with_cset(); + register_humongous_regions_with_cset(); - assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); + assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); - _cm->note_start_of_gc(); - // We call this after finalize_cset() to - // ensure that the CSet has been finalized. - _cm->verify_no_cset_oops(); - - if (_hr_printer.is_active()) { - HeapRegion* hr = g1_policy()->collection_set(); - while (hr != NULL) { - _hr_printer.cset(hr); - hr = hr->next_in_collection_set(); - } - } + _cm->note_start_of_gc(); + // We call this after finalize_cset() to + // ensure that the CSet has been finalized. + _cm->verify_no_cset_oops(); + + if (_hr_printer.is_active()) { + HeapRegion* hr = g1_policy()->collection_set(); + while (hr != NULL) { + _hr_printer.cset(hr); + hr = hr->next_in_collection_set(); + } + } #ifdef ASSERT - VerifyCSetClosure cl; - collection_set_iterate(&cl); + VerifyCSetClosure cl; + collection_set_iterate(&cl); #endif // ASSERT - setup_surviving_young_words(); + setup_surviving_young_words(); - // Initialize the GC alloc regions. - _allocator->init_gc_alloc_regions(evacuation_info); + // Initialize the GC alloc regions. + _allocator->init_gc_alloc_regions(evacuation_info); - G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers()); - // Actually do the work... - evacuate_collection_set(evacuation_info, &per_thread_states); + G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers()); + // Actually do the work... + evacuate_collection_set(evacuation_info, &per_thread_states); - free_collection_set(g1_policy()->collection_set(), evacuation_info); + free_collection_set(g1_policy()->collection_set(), evacuation_info); - eagerly_reclaim_humongous_regions(); + eagerly_reclaim_humongous_regions(); - g1_policy()->clear_collection_set(); + g1_policy()->clear_collection_set(); - cleanup_surviving_young_words(); + cleanup_surviving_young_words(); - // Start a new incremental collection set for the next pause. - g1_policy()->start_incremental_cset_building(); + // Start a new incremental collection set for the next pause. + g1_policy()->start_incremental_cset_building(); - clear_cset_fast_test(); + clear_cset_fast_test(); - _young_list->reset_sampled_info(); + _young_list->reset_sampled_info(); - // Don't check the whole heap at this point as the - // GC alloc regions from this pause have been tagged - // as survivors and moved on to the survivor list. - // Survivor regions will fail the !is_young() check. - assert(check_young_list_empty(false /* check_heap */), - "young list should be empty"); + // Don't check the whole heap at this point as the + // GC alloc regions from this pause have been tagged + // as survivors and moved on to the survivor list. + // Survivor regions will fail the !is_young() check. + assert(check_young_list_empty(false /* check_heap */), + "young list should be empty"); #if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); - _young_list->print(); + gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); + _young_list->print(); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_survivor_regions(_young_list->survivor_length(), - _young_list->first_survivor_region(), - _young_list->last_survivor_region()); - - _young_list->reset_auxilary_lists(); - - if (evacuation_failed()) { - set_used(recalculate_used()); - if (_archive_allocator != NULL) { - _archive_allocator->clear_used(); - } - for (uint i = 0; i < ParallelGCThreads; i++) { - if (_evacuation_failed_info_array[i].has_failed()) { - _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); - } - } - } else { - // The "used" of the the collection set have already been subtracted - // when they were freed. Add in the bytes evacuated. - increase_used(g1_policy()->bytes_copied_during_gc()); - } - - if (collector_state()->during_initial_mark_pause()) { - // We have to do this before we notify the CM threads that - // they can start working to make sure that all the - // appropriate initialization is done on the CM object. - concurrent_mark()->checkpointRootsInitialPost(); - collector_state()->set_mark_in_progress(true); - // Note that we don't actually trigger the CM thread at - // this point. We do that later when we're sure that - // the current thread has completed its logging output. - } + g1_policy()->record_survivor_regions(_young_list->survivor_length(), + _young_list->first_survivor_region(), + _young_list->last_survivor_region()); + + _young_list->reset_auxilary_lists(); + + if (evacuation_failed()) { + set_used(recalculate_used()); + if (_archive_allocator != NULL) { + _archive_allocator->clear_used(); + } + for (uint i = 0; i < ParallelGCThreads; i++) { + if (_evacuation_failed_info_array[i].has_failed()) { + _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); + } + } + } else { + // The "used" of the the collection set have already been subtracted + // when they were freed. Add in the bytes evacuated. + increase_used(g1_policy()->bytes_copied_during_gc()); + } + + if (collector_state()->during_initial_mark_pause()) { + // We have to do this before we notify the CM threads that + // they can start working to make sure that all the + // appropriate initialization is done on the CM object. + concurrent_mark()->checkpointRootsInitialPost(); + collector_state()->set_mark_in_progress(true); + // Note that we don't actually trigger the CM thread at + // this point. We do that later when we're sure that + // the current thread has completed its logging output. + } - allocate_dummy_regions(); + allocate_dummy_regions(); #if YOUNG_LIST_VERBOSE - gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); - _young_list->print(); - g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); + gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - _allocator->init_mutator_alloc_region(); - - { - size_t expand_bytes = g1_policy()->expansion_amount(); - if (expand_bytes > 0) { - size_t bytes_before = capacity(); - // No need for an ergo verbose message here, - // expansion_amount() does this when it returns a value > 0. - if (!expand(expand_bytes)) { - // We failed to expand the heap. Cannot do anything about it. - } - } - } - - // We redo the verification but now wrt to the new CSet which - // has just got initialized after the previous CSet was freed. - _cm->verify_no_cset_oops(); - _cm->note_end_of_gc(); - - // This timing is only used by the ergonomics to handle our pause target. - // It is unclear why this should not include the full pause. We will - // investigate this in CR 7178365. - double sample_end_time_sec = os::elapsedTime(); - double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; - g1_policy()->record_collection_pause_end(pause_time_ms); - - evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); - evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); - - MemoryService::track_memory_usage(); - - // In prepare_for_verify() below we'll need to scan the deferred - // update buffers to bring the RSets up-to-date if - // G1HRRSFlushLogBuffersOnVerify has been set. While scanning - // the update buffers we'll probably need to scan cards on the - // regions we just allocated to (i.e., the GC alloc - // regions). However, during the last GC we called - // set_saved_mark() on all the GC alloc regions, so card - // scanning might skip the [saved_mark_word()...top()] area of - // those regions (i.e., the area we allocated objects into - // during the last GC). But it shouldn't. Given that - // saved_mark_word() is conditional on whether the GC time stamp - // on the region is current or not, by incrementing the GC time - // stamp here we invalidate all the GC time stamps on all the - // regions and saved_mark_word() will simply return top() for - // all the regions. This is a nicer way of ensuring this rather - // than iterating over the regions and fixing them. In fact, the - // GC time stamp increment here also ensures that - // saved_mark_word() will return top() between pauses, i.e., - // during concurrent refinement. So we don't need the - // is_gc_active() check to decided which top to use when - // scanning cards (see CR 7039627). - increment_gc_time_stamp(); + _allocator->init_mutator_alloc_region(); - verify_after_gc(); - check_bitmaps("GC End"); - - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); - - // CM reference discovery will be re-enabled if necessary. - } - - // We should do this after we potentially expand the heap so - // that all the COMMIT events are generated before the end GC - // event, and after we retire the GC alloc regions so that all - // RETIRE events are generated before the end GC event. - _hr_printer.end_gc(false /* full */, (size_t) total_collections()); + { + size_t expand_bytes = g1_policy()->expansion_amount(); + if (expand_bytes > 0) { + size_t bytes_before = capacity(); + // No need for an ergo verbose message here, + // expansion_amount() does this when it returns a value > 0. + if (!expand(expand_bytes)) { + // We failed to expand the heap. Cannot do anything about it. + } + } + } + + // We redo the verification but now wrt to the new CSet which + // has just got initialized after the previous CSet was freed. + _cm->verify_no_cset_oops(); + _cm->note_end_of_gc(); + + // This timing is only used by the ergonomics to handle our pause target. + // It is unclear why this should not include the full pause. We will + // investigate this in CR 7178365. + double sample_end_time_sec = os::elapsedTime(); + double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; + g1_policy()->record_collection_pause_end(pause_time_ms); + + evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); + evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); + + MemoryService::track_memory_usage(); + + // In prepare_for_verify() below we'll need to scan the deferred + // update buffers to bring the RSets up-to-date if + // G1HRRSFlushLogBuffersOnVerify has been set. While scanning + // the update buffers we'll probably need to scan cards on the + // regions we just allocated to (i.e., the GC alloc + // regions). However, during the last GC we called + // set_saved_mark() on all the GC alloc regions, so card + // scanning might skip the [saved_mark_word()...top()] area of + // those regions (i.e., the area we allocated objects into + // during the last GC). But it shouldn't. Given that + // saved_mark_word() is conditional on whether the GC time stamp + // on the region is current or not, by incrementing the GC time + // stamp here we invalidate all the GC time stamps on all the + // regions and saved_mark_word() will simply return top() for + // all the regions. This is a nicer way of ensuring this rather + // than iterating over the regions and fixing them. In fact, the + // GC time stamp increment here also ensures that + // saved_mark_word() will return top() between pauses, i.e., + // during concurrent refinement. So we don't need the + // is_gc_active() check to decided which top to use when + // scanning cards (see CR 7039627). + increment_gc_time_stamp(); + + verify_after_gc(); + check_bitmaps("GC End"); + + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); + + // CM reference discovery will be re-enabled if necessary. + } + + // We should do this after we potentially expand the heap so + // that all the COMMIT events are generated before the end GC + // event, and after we retire the GC alloc regions so that all + // RETIRE events are generated before the end GC event. + _hr_printer.end_gc(false /* full */, (size_t) total_collections()); #ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); + ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(false); - } - - // Print the remainder of the GC log output. - log_gc_footer(os::elapsedTime() - pause_start_sec); - - // It is not yet to safe to tell the concurrent mark to - // start as we have some optional output below. We don't want the - // output from the concurrent mark thread interfering with this - // logging output either. - - _hrm.verify_optional(); - verify_region_sets_optional(); - - TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); - TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); + gc_epilogue(false); + } - print_heap_after_gc(); - trace_heap_after_gc(_gc_tracer_stw); + // Print the remainder of the GC log output. + log_gc_footer(os::elapsedTime() - pause_start_sec); - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); - - _gc_tracer_stw->report_evacuation_info(&evacuation_info); - _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); - _gc_timer_stw->register_gc_end(); - _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); + // It is not yet to safe to tell the concurrent mark to + // start as we have some optional output below. We don't want the + // output from the concurrent mark thread interfering with this + // logging output either. + + _hrm.verify_optional(); + verify_region_sets_optional(); + + TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); + + print_heap_after_gc(); + trace_heap_after_gc(_gc_tracer_stw); + + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + + _gc_tracer_stw->report_evacuation_info(&evacuation_info); + _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); + _gc_timer_stw->register_gc_end(); + _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); + } + // It should now be safe to tell the concurrent mark thread to start + // without its logging output interfering with the logging output + // that came from the pause. } - // It should now be safe to tell the concurrent mark thread to start - // without its logging output interfering with the logging output - // that came from the pause. if (should_start_conc_mark) { // CAUTION: after the doConcurrentMark() call below, --- old/src/share/vm/gc/g1/vm_operations_g1.cpp 2015-09-09 16:12:51.955861823 +0200 +++ new/src/share/vm/gc/g1/vm_operations_g1.cpp 2015-09-09 16:12:51.843861827 +0200 @@ -26,7 +26,6 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/shared/gcId.hpp" #include "gc/g1/g1Log.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/shared/gcTimer.hpp" @@ -228,7 +227,6 @@ void VM_CGC_Operation::doit() { TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCIdMark gc_id_mark(_gc_id); GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm()); IsGCActiveMark x; _cl->do_void(); --- old/src/share/vm/gc/g1/vm_operations_g1.hpp 2015-09-09 16:12:52.159861816 +0200 +++ new/src/share/vm/gc/g1/vm_operations_g1.hpp 2015-09-09 16:12:52.047861820 +0200 @@ -26,7 +26,6 @@ #define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP #include "gc/g1/g1AllocationContext.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/vmGCOperations.hpp" // VM_operations for the G1 collector. @@ -105,7 +104,6 @@ VoidClosure* _cl; const char* _printGCMessage; bool _needs_pll; - uint _gc_id; protected: // java.lang.ref.Reference support @@ -114,7 +112,7 @@ public: VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll) - : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { } + : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { } virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual void doit(); virtual bool doit_prologue(); --- old/src/share/vm/gc/shared/gcId.cpp 2015-09-09 16:12:52.363861809 +0200 +++ new/src/share/vm/gc/shared/gcId.cpp 2015-09-09 16:12:52.247861813 +0200 @@ -24,38 +24,28 @@ #include "precompiled.hpp" #include "gc/shared/gcId.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/thread.inline.hpp" uint GCId::_next_id = 0; - -NamedThread* currentNamedthread() { - assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread"); - return (NamedThread*)Thread::current(); -} +uint GCId::_current_id = UNDEFINED; const uint GCId::create() { return _next_id++; } const uint GCId::current() { - assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id."); - return current_raw(); -} - -const uint GCId::current_raw() { - return currentNamedthread()->gc_id(); -} - -GCIdMark::GCIdMark() : _gc_id(GCId::create()) { - currentNamedthread()->set_gc_id(_gc_id); + assert(_current_id != UNDEFINED, "Using undefined GC ID"); + return _current_id; } -GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) { - currentNamedthread()->set_gc_id(_gc_id); +GCIdMark::GCIdMark() { + _previous_gc_id = GCId::_current_id; + uint gc_id = GCId::create(); + GCId::set_current(gc_id); + DEBUG_ONLY(_gc_id = gc_id;) } GCIdMark::~GCIdMark() { - currentNamedthread()->set_gc_id(GCId::undefined()); + assert(_gc_id == GCId::_current_id, err_msg("GCIdMarks for %u and %u overlap.", _gc_id, GCId::_current_id)); + GCId::set_current(_previous_gc_id); } --- old/src/share/vm/gc/shared/gcId.hpp 2015-09-09 16:12:52.563861802 +0200 +++ new/src/share/vm/gc/shared/gcId.hpp 2015-09-09 16:12:52.447861806 +0200 @@ -30,22 +30,20 @@ class GCId : public AllStatic { friend class GCIdMark; static uint _next_id; + static uint _current_id; static const uint UNDEFINED = (uint)-1; static const uint create(); + static void set_current(uint gc_id) { _current_id = gc_id; } public: - // Returns the currently active GC id. Asserts that there is an active GC id. static const uint current(); - // Same as current() but can return undefined() if no GC id is currently active - static const uint current_raw(); - static const uint undefined() { return UNDEFINED; } }; class GCIdMark { - uint _gc_id; + uint _previous_gc_id; + DEBUG_ONLY(uint _gc_id;) public: GCIdMark(); - GCIdMark(uint gc_id); ~GCIdMark(); }; --- old/src/share/vm/gc/shared/gcTraceTime.cpp 2015-09-09 16:12:52.771861795 +0200 +++ new/src/share/vm/gc/shared/gcTraceTime.cpp 2015-09-09 16:12:52.659861799 +0200 @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" --- old/src/share/vm/gc/shared/workgroup.cpp 2015-09-09 16:12:52.979861788 +0200 +++ new/src/share/vm/gc/shared/workgroup.cpp 2015-09-09 16:12:52.867861791 +0200 @@ -23,7 +23,6 @@ */ #include "precompiled.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" @@ -329,7 +328,6 @@ void GangWorker::run_task(WorkData data) { print_task_started(data); - GCIdMark gc_id_mark(data._task->gc_id()); data._task->work(data._worker_id); print_task_done(data); --- old/src/share/vm/gc/shared/workgroup.hpp 2015-09-09 16:12:53.191861780 +0200 +++ new/src/share/vm/gc/shared/workgroup.hpp 2015-09-09 16:12:53.079861784 +0200 @@ -28,7 +28,6 @@ #include "memory/allocation.hpp" #include "runtime/globals.hpp" #include "runtime/thread.hpp" -#include "gc/shared/gcId.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -55,13 +54,9 @@ // You subclass this to supply your own work() method class AbstractGangTask VALUE_OBJ_CLASS_SPEC { const char* _name; - const uint _gc_id; public: - AbstractGangTask(const char* name) : - _name(name), - _gc_id(GCId::current_raw()) // Use current_raw() here since the G1ParVerifyTask can be called outside of a GC (at VM exit) - {} + AbstractGangTask(const char* name) : _name(name) {} // The abstract work method. // The argument tells you which member of the gang you are. @@ -69,7 +64,6 @@ // Debugging accessor for the name. const char* name() const { return _name; } - const uint gc_id() const { return _gc_id; } }; struct WorkData { --- old/src/share/vm/runtime/thread.cpp 2015-09-09 16:12:53.411861773 +0200 +++ new/src/share/vm/runtime/thread.cpp 2015-09-09 16:12:53.291861777 +0200 @@ -31,7 +31,6 @@ #include "code/codeCacheExtensions.hpp" #include "code/scopeDesc.hpp" #include "compiler/compileBroker.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/workgroup.hpp" #include "interpreter/interpreter.hpp" @@ -220,7 +219,6 @@ NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); - _gc_id = GCId::undefined(); _vm_operation_started_count = 0; _vm_operation_completed_count = 0; _current_pending_monitor = NULL; --- old/src/share/vm/runtime/thread.hpp 2015-09-09 16:12:53.679861763 +0200 +++ new/src/share/vm/runtime/thread.hpp 2015-09-09 16:12:53.567861767 +0200 @@ -266,7 +266,6 @@ ThreadLocalAllocBuffer _tlab; // Thread-local eden jlong _allocated_bytes; // Cumulative number of bytes allocated on // the Java heap - uint _gc_id; // The current GC id when a thread takes part in GC TRACE_DATA _trace_data; // Thread-local data for tracing @@ -426,9 +425,6 @@ void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } inline jlong cooked_allocated_bytes(); - void set_gc_id(uint gc_id) { _gc_id = gc_id; } - uint gc_id() { return _gc_id; } - TRACE_DATA* trace_data() { return &_trace_data; } const ThreadExt& ext() const { return _ext; } --- old/src/share/vm/utilities/ostream.hpp 2015-09-09 16:12:53.903861755 +0200 +++ new/src/share/vm/utilities/ostream.hpp 2015-09-09 16:12:53.791861759 +0200 @@ -28,7 +28,6 @@ #include "memory/allocation.hpp" #include "runtime/timer.hpp" -class GCId; DEBUG_ONLY(class ResourceMark;) // Output streams for printing