src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

*** 37,46 **** --- 37,50 ---- #include "gc_implementation/parallelScavenge/psOldGen.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" + #include "gc_implementation/shared/gcHeapSummary.hpp" + #include "gc_implementation/shared/gcTimer.hpp" + #include "gc_implementation/shared/gcTrace.hpp" + #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/gcCause.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/referenceProcessor.hpp"
*** 770,779 **** --- 774,785 ---- ParallelCompactData& sd = PSParallelCompact::summary_data(); return sd.region(region_index); } #endif + STWGCTimer PSParallelCompact::_gc_timer; + ParallelOldTracer PSParallelCompact::_gc_tracer; elapsedTimer PSParallelCompact::_accumulated_time; unsigned int PSParallelCompact::_total_invocations = 0; unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0; jlong PSParallelCompact::_time_of_last_gc = 0; CollectorCounters* PSParallelCompact::_counters = NULL;
*** 943,953 **** { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. ! TraceTime tm("pre compact", print_phases(), true, gclog_or_tty); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap); --- 949,959 ---- { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. ! GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap);
*** 960,969 **** --- 966,976 ---- // We need to track unique mark sweep invocations as well. _total_invocations++; heap->print_heap_before_gc(); + heap->trace_heap_before_gc(&_gc_tracer); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs
*** 985,995 **** gc_task_manager()->release_all_resources(); } void PSParallelCompact::post_compact() { ! TraceTime tm("post compact", print_phases(), true, gclog_or_tty); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data. --- 992,1002 ---- gc_task_manager()->release_all_resources(); } void PSParallelCompact::post_compact() { ! GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data.
*** 1811,1821 **** #endif // #ifndef PRODUCT void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { ! TraceTime tm("summary phase", print_phases(), true, gclog_or_tty); // trace("2"); #ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { tty->print_cr("add_obj_count=" SIZE_FORMAT " " --- 1818,1828 ---- #endif // #ifndef PRODUCT void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { ! GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); // trace("2"); #ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { tty->print_cr("add_obj_count=" SIZE_FORMAT " "
*** 1974,1988 **** if (GC_locker::check_active_before_gc()) { return false; } TimeStamp marking_start; TimeStamp compaction_start; TimeStamp collection_exit; - ParallelScavengeHeap* heap = gc_heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); --- 1981,1999 ---- if (GC_locker::check_active_before_gc()) { return false; } + ParallelScavengeHeap* heap = gc_heap(); + + _gc_timer.register_gc_start(os::elapsed_counter()); + _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); + TimeStamp marking_start; TimeStamp compaction_start; TimeStamp collection_exit; GCCause::Cause gc_cause = heap->gc_cause(); PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy();
*** 1994,2004 **** if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } ! heap->pre_full_gc_dump(); _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; // Make sure data structures are sane, make the heap parsable, and do other // miscellaneous bookkeeping. --- 2005,2015 ---- if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } ! heap->pre_full_gc_dump(&_gc_timer); _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes; // Make sure data structures are sane, make the heap parsable, and do other // miscellaneous bookkeeping.
*** 2021,2031 **** gc_task_manager()->task_idle_workers(); heap->set_par_threads(gc_task_manager()->active_workers()); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start(); --- 2032,2042 ---- gc_task_manager()->task_idle_workers(); heap->set_par_threads(gc_task_manager()->active_workers()); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceGen1Time) accumulated_time()->start();
*** 2041,2051 **** ref_processor()->setup_policy(maximum_heap_compaction); bool marked_for_unloading = false; marking_start.update(); ! marking_phase(vmthread_cm, maximum_heap_compaction); bool max_on_system_gc = UseMaximumCompactionOnSystemGC && gc_cause == GCCause::_java_lang_system_gc; summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc); --- 2052,2062 ---- ref_processor()->setup_policy(maximum_heap_compaction); bool marked_for_unloading = false; marking_start.update(); ! marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer); bool max_on_system_gc = UseMaximumCompactionOnSystemGC && gc_cause == GCCause::_java_lang_system_gc; summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
*** 2194,2217 **** NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); collection_exit.update(); heap->print_heap_after_gc(); if (PrintGCTaskTimeStamps) { gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, marking_start.ticks(), compaction_start.ticks(), collection_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } ! heap->post_full_gc_dump(); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif return true; } bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, PSYoungGen* young_gen, --- 2205,2235 ---- NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); collection_exit.update(); heap->print_heap_after_gc(); + heap->trace_heap_after_gc(&_gc_tracer); + if (PrintGCTaskTimeStamps) { gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, marking_start.ticks(), compaction_start.ticks(), collection_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } ! heap->post_full_gc_dump(&_gc_timer); #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif + _gc_timer.register_gc_end(os::elapsed_counter()); + + _gc_tracer.report_dense_prefix(dense_prefix(old_space_id)); + _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); + return true; } bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, PSYoungGen* young_gen,
*** 2306,2318 **** "shouldn't return NULL"); return ParallelScavengeHeap::gc_task_manager(); } void PSParallelCompact::marking_phase(ParCompactionManager* cm, ! bool maximum_heap_compaction) { // Recursively traverse all live objects and mark them ! TraceTime tm("marking phase", print_phases(), true, gclog_or_tty); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); --- 2324,2337 ---- "shouldn't return NULL"); return ParallelScavengeHeap::gc_task_manager(); } void PSParallelCompact::marking_phase(ParCompactionManager* cm, ! bool maximum_heap_compaction, ! ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them ! GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array();
*** 2323,2333 **** // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); { ! TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe)); --- 2342,2353 ---- // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); { ! GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); ! ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
*** 2351,2373 **** gc_task_manager()->execute_and_wait(q); } // Process reference objects found during marking { ! TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty); if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; ! ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, ! &task_executor); } else { ! ref_processor()->process_discovered_references( ! is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL); } } ! TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); // Follow system dictionary roots and unload classes. --- 2371,2398 ---- gc_task_manager()->execute_and_wait(q); } // Process reference objects found during marking { ! GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); ! ! ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; ! stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, ! &task_executor, &_gc_timer); } else { ! stats = ref_processor()->process_discovered_references( ! is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, ! &_gc_timer); } + + gc_tracer->report_gc_reference_stats(stats); } ! GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); // Follow system dictionary roots and unload classes.
*** 2382,2391 **** --- 2407,2417 ---- // Delete entries for dead interned strings. StringTable::unlink(is_alive_closure()); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); + _gc_tracer.report_object_count_after_gc(is_alive_closure()); } void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { ClassLoaderData* cld = klass->class_loader_data(); // The actual processing of the klass is done when we
*** 2422,2432 **** }; static PSAlwaysTrueClosure always_true; void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations ! TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. --- 2448,2458 ---- }; static PSAlwaysTrueClosure always_true; void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations ! GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots.
*** 2458,2468 **** } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty); // Find the threads that are active unsigned int which = 0; const uint task_count = MAX2(parallel_gc_threads, 1U); --- 2484,2494 ---- } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); // Find the threads that are active unsigned int which = 0; const uint task_count = MAX2(parallel_gc_threads, 1U);
*** 2532,2542 **** #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty); ParallelCompactData& sd = PSParallelCompact::summary_data(); // Iterate over all the spaces adding tasks for updating // regions in the dense prefix. Assume that 1 gc thread --- 2558,2568 ---- #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); ParallelCompactData& sd = PSParallelCompact::summary_data(); // Iterate over all the spaces adding tasks for updating // regions in the dense prefix. Assume that 1 gc thread
*** 2614,2624 **** void PSParallelCompact::enqueue_region_stealing_tasks( GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { ! TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty); // Once a thread has drained it's stack, it should try to steal regions from // other threads. if (parallel_gc_threads > 1) { for (uint j = 0; j < parallel_gc_threads; j++) { --- 2640,2650 ---- void PSParallelCompact::enqueue_region_stealing_tasks( GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { ! GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); // Once a thread has drained it's stack, it should try to steal regions from // other threads. if (parallel_gc_threads > 1) { for (uint j = 0; j < parallel_gc_threads; j++) {
*** 2627,2637 **** } } void PSParallelCompact::compact() { // trace("5"); ! TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); --- 2653,2663 ---- } } void PSParallelCompact::compact() { // trace("5"); ! GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset();
*** 2644,2654 **** enqueue_region_draining_tasks(q, active_gc_threads); enqueue_dense_prefix_tasks(q, active_gc_threads); enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { ! TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty); gc_task_manager()->execute_and_wait(q); #ifdef ASSERT // Verify that all regions have been processed before the deferred updates. --- 2670,2680 ---- enqueue_region_draining_tasks(q, active_gc_threads); enqueue_dense_prefix_tasks(q, active_gc_threads); enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { ! GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); gc_task_manager()->execute_and_wait(q); #ifdef ASSERT // Verify that all regions have been processed before the deferred updates.
*** 2658,2668 **** #endif } { // Update the deferred objects, if any. Any compaction manager can be used. ! TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } } --- 2684,2694 ---- #endif } { // Update the deferred objects, if any. Any compaction manager can be used. ! GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } }