< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page

        

*** 38,47 **** --- 38,48 ---- #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" + #include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.hpp" #include "gc/shared/isGCActiveMark.hpp"
*** 958,968 **** { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. ! GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap); --- 959,969 ---- { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. ! GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); pre_gc_values->fill(heap);
*** 1001,1011 **** gc_task_manager()->release_all_resources(); } void PSParallelCompact::post_compact() { ! GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data. --- 1002,1012 ---- gc_task_manager()->release_all_resources(); } void PSParallelCompact::post_compact() { ! GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. clear_data_covering_space(SpaceId(id)); // Update top(). Must be done after clearing the bitmap and summary data.
*** 1822,1832 **** #endif // #ifndef PRODUCT void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { ! GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // trace("2"); #ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { tty->print_cr("add_obj_count=" SIZE_FORMAT " " --- 1823,1833 ---- #endif // #ifndef PRODUCT void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { ! GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); // trace("2"); #ifdef ASSERT if (TraceParallelOldGCMarkingPhase) { tty->print_cr("add_obj_count=" SIZE_FORMAT " "
*** 1982,1991 **** --- 1983,1993 ---- return false; } ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); + GCIdMark gc_id_mark; _gc_timer.register_gc_start(); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); TimeStamp marking_start; TimeStamp compaction_start;
*** 2029,2039 **** // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceOldGenTime) accumulated_time()->start(); --- 2031,2041 ---- // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); if (TraceOldGenTime) accumulated_time()->start();
*** 2329,2339 **** void PSParallelCompact::marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them ! GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array(); --- 2331,2341 ---- void PSParallelCompact::marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them ! GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); TaskQueueSetSuper* qset = ParCompactionManager::region_array();
*** 2344,2354 **** // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); { ! GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); --- 2346,2356 ---- // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); { ! GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create();
*** 2373,2400 **** gc_task_manager()->execute_and_wait(q); } // Process reference objects found during marking { ! GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, ! &task_executor, &_gc_timer, _gc_tracer.gc_id()); } else { stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, ! &_gc_timer, _gc_tracer.gc_id()); } gc_tracer->report_gc_reference_stats(stats); } ! GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); // Follow system dictionary roots and unload classes. --- 2375,2402 ---- gc_task_manager()->execute_and_wait(q); } // Process reference objects found during marking { ! GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, ! &task_executor, &_gc_timer); } else { stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, ! &_gc_timer); } gc_tracer->report_gc_reference_stats(stats); } ! GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); // Follow system dictionary roots and unload classes.
*** 2421,2431 **** }; static PSAlwaysTrueClosure always_true; void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations ! GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. --- 2423,2433 ---- }; static PSAlwaysTrueClosure always_true; void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations ! GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots.
*** 2457,2467 **** } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Find the threads that are active unsigned int which = 0; const uint task_count = MAX2(parallel_gc_threads, 1U); --- 2459,2469 ---- } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); // Find the threads that are active unsigned int which = 0; const uint task_count = MAX2(parallel_gc_threads, 1U);
*** 2531,2541 **** #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelCompactData& sd = PSParallelCompact::summary_data(); // Iterate over all the spaces adding tasks for updating // regions in the dense prefix. Assume that 1 gc thread --- 2533,2543 ---- #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { ! GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); ParallelCompactData& sd = PSParallelCompact::summary_data(); // Iterate over all the spaces adding tasks for updating // regions in the dense prefix. Assume that 1 gc thread
*** 2613,2623 **** void PSParallelCompact::enqueue_region_stealing_tasks( GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { ! GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Once a thread has drained it's stack, it should try to steal regions from // other threads. if (parallel_gc_threads > 1) { for (uint j = 0; j < parallel_gc_threads; j++) { --- 2615,2625 ---- void PSParallelCompact::enqueue_region_stealing_tasks( GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { ! GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); // Once a thread has drained it's stack, it should try to steal regions from // other threads. if (parallel_gc_threads > 1) { for (uint j = 0; j < parallel_gc_threads; j++) {
*** 2661,2671 **** } #endif // #ifdef ASSERT void PSParallelCompact::compact() { // trace("5"); ! GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); --- 2663,2673 ---- } #endif // #ifdef ASSERT void PSParallelCompact::compact() { // trace("5"); ! GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); PSOldGen* old_gen = heap->old_gen(); old_gen->start_array()->reset(); uint parallel_gc_threads = heap->gc_task_manager()->workers();
*** 2677,2687 **** enqueue_region_draining_tasks(q, active_gc_threads); enqueue_dense_prefix_tasks(q, active_gc_threads); enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { ! GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); gc_task_manager()->execute_and_wait(q); #ifdef ASSERT // Verify that all regions have been processed before the deferred updates. --- 2679,2689 ---- enqueue_region_draining_tasks(q, active_gc_threads); enqueue_dense_prefix_tasks(q, active_gc_threads); enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { ! GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); gc_task_manager()->execute_and_wait(q); #ifdef ASSERT // Verify that all regions have been processed before the deferred updates.
*** 2691,2701 **** #endif } { // Update the deferred objects, if any. Any compaction manager can be used. ! GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } } --- 2693,2703 ---- #endif } { // Update the deferred objects, if any. Any compaction manager can be used. ! GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id)); } }
< prev index next >