< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page
rev 11778 : [mq]: service.patch


2042   ObjectStartArray* const start_array = old_gen->start_array();
2043   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2044     start_array->allocate_block(p);
2045   }
2046 
2047   // Could update the promoted average here, but it is not typically updated at
2048   // full GCs and the value to use is unclear.  Something like
2049   //
2050   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2051 
2052   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2053   return true;
2054 }
2055 
2056 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2057   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2058     "shouldn't return NULL");
2059   return ParallelScavengeHeap::gc_task_manager();
2060 }
2061 




















2062 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2063                                       bool maximum_heap_compaction,
2064                                       ParallelOldTracer *gc_tracer) {
2065   // Recursively traverse all live objects and mark them
2066   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2067 
2068   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2069   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2070   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2071   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2072   ParallelTaskTerminator terminator(active_gc_threads, qset);
2073 
2074   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2075   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2076 
2077   // Need new claim bits before marking starts.
2078   ClassLoaderDataGraph::clear_claimed_marks();
2079 
2080   {
2081     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2082 
2083     ParallelScavengeHeap::ParStrongRootsScope psrs;
2084 
2085     GCTaskQueue* q = GCTaskQueue::create();
2086 
2087     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2088     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2089     // We scan the thread roots in parallel
2090     Threads::create_thread_roots_marking_tasks(q);

2091     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2092     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2093     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2094     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2095     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2096     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2097     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2098 
2099     if (active_gc_threads > 1) {
2100       for (uint j = 0; j < active_gc_threads; j++) {
2101         q->enqueue(new StealMarkingTask(&terminator));
2102       }
2103     }
2104 
2105     gc_task_manager()->execute_and_wait(q);
2106   }
2107 
2108   // Process reference objects found during marking
2109   {
2110     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);




2042   ObjectStartArray* const start_array = old_gen->start_array();
2043   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2044     start_array->allocate_block(p);
2045   }
2046 
2047   // Could update the promoted average here, but it is not typically updated at
2048   // full GCs and the value to use is unclear.  Something like
2049   //
2050   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2051 
2052   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2053   return true;
2054 }
2055 
2056 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2057   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2058     "shouldn't return NULL");
2059   return ParallelScavengeHeap::gc_task_manager();
2060 }
2061 
2062 class CreateThreadRootsMarkingTasksClosure : public ThreadClosure {
2063 
2064 private:
2065   GCTaskQueue* _queue;
2066 
2067 public:
2068 
2069   CreateThreadRootsMarkingTasksClosure(GCTaskQueue* q) : _queue(q) {
2070   }
2071 
2072   void do_thread(Thread* thread) {
2073     if (thread->is_Java_thread()) {
2074       _queue->enqueue(new ThreadRootsMarkingTask((JavaThread*) thread));
2075     } else if (thread->is_VM_thread()) {
2076       _queue->enqueue(new ThreadRootsMarkingTask((VMThread*) thread));
2077     }
2078   }
2079 
2080 };
2081 
2082 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2083                                       bool maximum_heap_compaction,
2084                                       ParallelOldTracer *gc_tracer) {
2085   // Recursively traverse all live objects and mark them
2086   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2087 
2088   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2089   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2090   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2091   TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
2092   ParallelTaskTerminator terminator(active_gc_threads, qset);
2093 
2094   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2095   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2096 
2097   // Need new claim bits before marking starts.
2098   ClassLoaderDataGraph::clear_claimed_marks();
2099 
2100   {
2101     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2102 
2103     ParallelScavengeHeap::ParStrongRootsScope psrs;
2104 
2105     GCTaskQueue* q = GCTaskQueue::create();
2106 
2107     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2108     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2109     // We scan the thread roots in parallel
2110     CreateThreadRootsMarkingTasksClosure cl(q);
2111     Threads::threads_do(&cl);
2112     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2113     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2114     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2115     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2116     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2117     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2118     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2119 
2120     if (active_gc_threads > 1) {
2121       for (uint j = 0; j < active_gc_threads; j++) {
2122         q->enqueue(new StealMarkingTask(&terminator));
2123       }
2124     }
2125 
2126     gc_task_manager()->execute_and_wait(q);
2127   }
2128 
2129   // Process reference objects found during marking
2130   {
2131     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);


< prev index next >