< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page




2083 private:
2084   GCTaskQueue* _q;
2085 
2086 public:
2087   PCAddThreadRootsMarkingTaskClosure(GCTaskQueue* q) : _q(q) { }
2088   void do_thread(Thread* t) {
2089     _q->enqueue(new ThreadRootsMarkingTask(t));
2090   }
2091 };
2092 
2093 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2094                                       bool maximum_heap_compaction,
2095                                       ParallelOldTracer *gc_tracer) {
2096   // Recursively traverse all live objects and mark them
2097   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2098 
2099   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2100   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2101   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2102   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2103   ParallelTaskTerminator terminator(active_gc_threads, qset);
2104 
2105   PCMarkAndPushClosure mark_and_push_closure(cm);
2106   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2107 
2108   // Need new claim bits before marking starts.
2109   ClassLoaderDataGraph::clear_claimed_marks();
2110 
2111   {
2112     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2113 
2114     ParallelScavengeHeap::ParStrongRootsScope psrs;
2115 
2116     GCTaskQueue* q = GCTaskQueue::create();
2117 
2118     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2119     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2120     // We scan the thread roots in parallel
2121     PCAddThreadRootsMarkingTaskClosure cl(q);
2122     Threads::java_threads_and_vm_thread_do(&cl);
2123     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2124     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2125     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2126     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2127     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2128     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2129 
2130     if (active_gc_threads > 1) {
2131       for (uint j = 0; j < active_gc_threads; j++) {
2132         q->enqueue(new StealMarkingTask(&terminator));
2133       }
2134     }
2135 
2136     gc_task_manager()->execute_and_wait(q);
2137   }
2138 
2139   // Process reference objects found during marking
2140   {
2141     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
2142 
2143     ReferenceProcessorStats stats;
2144     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
2145 
2146     if (ref_processor()->processing_is_mt()) {
2147       ref_processor()->set_active_mt_degree(active_gc_threads);
2148 
2149       RefProcTaskExecutor task_executor;
2150       stats = ref_processor()->process_discovered_references(
2151         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2152         &task_executor, &pt);


2442       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2443       for (size_t i = 0; i < histo_len; ++i) {
2444         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2445                    histo[i], 100.0 * histo[i] / region_cnt);
2446       }
2447       out->cr();
2448     }
2449   }
2450 }
2451 #endif // #ifdef ASSERT
2452 
2453 void PSParallelCompact::compact() {
2454   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2455 
2456   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2457   PSOldGen* old_gen = heap->old_gen();
2458   old_gen->start_array()->reset();
2459   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2460   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2461   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2462   ParallelTaskTerminator terminator(active_gc_threads, qset);
2463 
2464   GCTaskQueue* q = GCTaskQueue::create();
2465   prepare_region_draining_tasks(q, active_gc_threads);
2466   enqueue_dense_prefix_tasks(q, active_gc_threads);
2467   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2468 
2469   {
2470     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2471 
2472     gc_task_manager()->execute_and_wait(q);
2473 
2474 #ifdef  ASSERT
2475     // Verify that all regions have been processed before the deferred updates.
2476     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2477       verify_complete(SpaceId(id));
2478     }
2479 #endif
2480   }
2481 
2482   {
2483     // Update the deferred objects, if any.  Any compaction manager can be used.
2484     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2485     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2486     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2487       update_deferred_objects(cm, SpaceId(id));




2083 private:
2084   GCTaskQueue* _q;
2085 
2086 public:
2087   PCAddThreadRootsMarkingTaskClosure(GCTaskQueue* q) : _q(q) { }
2088   void do_thread(Thread* t) {
2089     _q->enqueue(new ThreadRootsMarkingTask(t));
2090   }
2091 };
2092 
2093 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2094                                       bool maximum_heap_compaction,
2095                                       ParallelOldTracer *gc_tracer) {
2096   // Recursively traverse all live objects and mark them
2097   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2098 
2099   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2100   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2101   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2102   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2103   TaskTerminator terminator(active_gc_threads, qset);
2104 
2105   PCMarkAndPushClosure mark_and_push_closure(cm);
2106   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2107 
2108   // Need new claim bits before marking starts.
2109   ClassLoaderDataGraph::clear_claimed_marks();
2110 
2111   {
2112     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2113 
2114     ParallelScavengeHeap::ParStrongRootsScope psrs;
2115 
2116     GCTaskQueue* q = GCTaskQueue::create();
2117 
2118     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2119     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2120     // We scan the thread roots in parallel
2121     PCAddThreadRootsMarkingTaskClosure cl(q);
2122     Threads::java_threads_and_vm_thread_do(&cl);
2123     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2124     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2125     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2126     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2127     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2128     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2129 
2130     if (active_gc_threads > 1) {
2131       for (uint j = 0; j < active_gc_threads; j++) {
2132         q->enqueue(new StealMarkingTask(terminator.terminator()));
2133       }
2134     }
2135 
2136     gc_task_manager()->execute_and_wait(q);
2137   }
2138 
2139   // Process reference objects found during marking
2140   {
2141     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
2142 
2143     ReferenceProcessorStats stats;
2144     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
2145 
2146     if (ref_processor()->processing_is_mt()) {
2147       ref_processor()->set_active_mt_degree(active_gc_threads);
2148 
2149       RefProcTaskExecutor task_executor;
2150       stats = ref_processor()->process_discovered_references(
2151         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2152         &task_executor, &pt);


2442       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2443       for (size_t i = 0; i < histo_len; ++i) {
2444         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2445                    histo[i], 100.0 * histo[i] / region_cnt);
2446       }
2447       out->cr();
2448     }
2449   }
2450 }
2451 #endif // #ifdef ASSERT
2452 
2453 void PSParallelCompact::compact() {
2454   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2455 
2456   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2457   PSOldGen* old_gen = heap->old_gen();
2458   old_gen->start_array()->reset();
2459   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2460   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2461   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2462   TaskTerminator terminator(active_gc_threads, qset);
2463 
2464   GCTaskQueue* q = GCTaskQueue::create();
2465   prepare_region_draining_tasks(q, active_gc_threads);
2466   enqueue_dense_prefix_tasks(q, active_gc_threads);
2467   enqueue_region_stealing_tasks(q, terminator.terminator(), active_gc_threads);
2468 
2469   {
2470     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2471 
2472     gc_task_manager()->execute_and_wait(q);
2473 
2474 #ifdef  ASSERT
2475     // Verify that all regions have been processed before the deferred updates.
2476     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2477       verify_complete(SpaceId(id));
2478     }
2479 #endif
2480   }
2481 
2482   {
2483     // Update the deferred objects, if any.  Any compaction manager can be used.
2484     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2485     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2486     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2487       update_deferred_objects(cm, SpaceId(id));


< prev index next >